mirror of
https://github.com/restic/restic.git
synced 2025-12-15 04:13:09 +00:00
Vendor dependencies for GCS
This commit is contained in:
416
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
Normal file
416
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const adminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// AdminClient is a client type for performing admin operations within a specific instance.
|
||||
type AdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
tClient btapb.BigtableTableAdminClient
|
||||
|
||||
project, instance string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// NewAdminClient creates a new AdminClient for a given project and instance.
|
||||
func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) {
|
||||
o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := gtransport.Dial(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &AdminClient{
|
||||
conn: conn,
|
||||
tClient: btapb.NewBigtableTableAdminClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the AdminClient.
|
||||
func (ac *AdminClient) Close() error {
|
||||
return ac.conn.Close()
|
||||
}
|
||||
|
||||
func (ac *AdminClient) instancePrefix() string {
|
||||
return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance)
|
||||
}
|
||||
|
||||
// Tables returns a list of the tables in the instance.
|
||||
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ListTablesRequest{
|
||||
Parent: prefix,
|
||||
}
|
||||
res, err := ac.tClient.ListTables(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, 0, len(res.Tables))
|
||||
for _, tbl := range res.Tables {
|
||||
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// TableConf contains all of the information necessary to create a table with column families.
|
||||
type TableConf struct {
|
||||
TableID string
|
||||
SplitKeys []string
|
||||
// Families is a map from family name to GCPolicy
|
||||
Families map[string]GCPolicy
|
||||
}
|
||||
|
||||
// CreateTable creates a new table in the instance.
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
|
||||
return ac.CreateTableFromConf(ctx, &TableConf{TableID: table})
|
||||
}
|
||||
|
||||
// CreatePresplitTable creates a new table in the instance.
|
||||
// The list of row keys will be used to initially split the table into multiple tablets.
|
||||
// Given two split keys, "s1" and "s2", three tablets will be created,
|
||||
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error {
|
||||
return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys})
|
||||
}
|
||||
|
||||
// CreateTableFromConf creates a new table in the instance from the given configuration.
|
||||
func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
var req_splits []*btapb.CreateTableRequest_Split
|
||||
for _, split := range conf.SplitKeys {
|
||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
|
||||
}
|
||||
var tbl btapb.Table
|
||||
if conf.Families != nil {
|
||||
tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
|
||||
for fam, policy := range conf.Families {
|
||||
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()}
|
||||
}
|
||||
}
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
TableId: conf.TableID,
|
||||
Table: &tbl,
|
||||
InitialSplits: req_splits,
|
||||
}
|
||||
_, err := ac.tClient.CreateTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateColumnFamily creates a new column family in a table.
|
||||
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
|
||||
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteTable deletes a table and all of its data.
|
||||
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DeleteTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
_, err := ac.tClient.DeleteTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteColumnFamily deletes a column family in a table and all of its data.
|
||||
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// TableInfo represents information about a table.
|
||||
type TableInfo struct {
|
||||
// DEPRECATED - This field is deprecated. Please use FamilyInfos instead.
|
||||
Families []string
|
||||
FamilyInfos []FamilyInfo
|
||||
}
|
||||
|
||||
// FamilyInfo represents information about a column family.
|
||||
type FamilyInfo struct {
|
||||
Name string
|
||||
GCPolicy string
|
||||
}
|
||||
|
||||
// TableInfo retrieves information about a table.
|
||||
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.GetTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
res, err := ac.tClient.GetTable(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ti := &TableInfo{}
|
||||
for name, fam := range res.ColumnFamilies {
|
||||
ti.Families = append(ti.Families, name)
|
||||
ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)})
|
||||
}
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
// SetGCPolicy specifies which cells in a column family should be garbage collected.
|
||||
// GC executes opportunistically in the background; table reads may return data
|
||||
// matching the GC policy.
|
||||
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DropRowRange permanently deletes a row range from the specified table.
|
||||
func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DropRowRangeRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)},
|
||||
}
|
||||
_, err := ac.tClient.DropRowRange(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// InstanceAdminClient is a client type for performing admin operations on instances.
|
||||
// These operations can be substantially more dangerous than those provided by AdminClient.
|
||||
type InstanceAdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
iClient btapb.BigtableInstanceAdminClient
|
||||
lroClient *lroauto.OperationsClient
|
||||
|
||||
project string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient creates a new InstanceAdminClient for a given project.
|
||||
func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) {
|
||||
o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := gtransport.Dial(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InstanceAdminClient{
|
||||
conn: conn,
|
||||
iClient: btapb.NewBigtableInstanceAdminClient(conn),
|
||||
lroClient: lroClient,
|
||||
|
||||
project: project,
|
||||
md: metadata.Pairs(resourcePrefixHeader, "projects/"+project),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the InstanceAdminClient.
|
||||
func (iac *InstanceAdminClient) Close() error {
|
||||
return iac.conn.Close()
|
||||
}
|
||||
|
||||
// StorageType is the type of storage used for all tables in an instance
|
||||
type StorageType int
|
||||
|
||||
const (
|
||||
SSD StorageType = iota
|
||||
HDD
|
||||
)
|
||||
|
||||
func (st StorageType) proto() btapb.StorageType {
|
||||
if st == HDD {
|
||||
return btapb.StorageType_HDD
|
||||
}
|
||||
return btapb.StorageType_SSD
|
||||
}
|
||||
|
||||
// InstanceType is the type of the instance
|
||||
type InstanceType int32
|
||||
|
||||
const (
|
||||
PRODUCTION InstanceType = InstanceType(btapb.Instance_PRODUCTION)
|
||||
DEVELOPMENT = InstanceType(btapb.Instance_DEVELOPMENT)
|
||||
)
|
||||
|
||||
// InstanceInfo represents information about an instance
|
||||
type InstanceInfo struct {
|
||||
Name string // name of the instance
|
||||
DisplayName string // display name for UIs
|
||||
}
|
||||
|
||||
// InstanceConf contains the information necessary to create an Instance
|
||||
type InstanceConf struct {
|
||||
InstanceId, DisplayName, ClusterId, Zone string
|
||||
// NumNodes must not be specified for DEVELOPMENT instance types
|
||||
NumNodes int32
|
||||
StorageType StorageType
|
||||
InstanceType InstanceType
|
||||
}
|
||||
|
||||
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
|
||||
|
||||
// CreateInstance creates a new instance in the project.
|
||||
// This method will return when the instance has been created or when an error occurs.
|
||||
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.CreateInstanceRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
InstanceId: conf.InstanceId,
|
||||
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
|
||||
Clusters: map[string]*btapb.Cluster{
|
||||
conf.ClusterId: {
|
||||
ServeNodes: conf.NumNodes,
|
||||
DefaultStorageType: conf.StorageType.proto(),
|
||||
Location: "projects/" + iac.project + "/locations/" + conf.Zone,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
lro, err := iac.iClient.CreateInstance(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := btapb.Instance{}
|
||||
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
// DeleteInstance deletes an instance from the project.
|
||||
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
|
||||
_, err := iac.iClient.DeleteInstance(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Instances returns a list of instances in the project.
|
||||
func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.ListInstancesRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
}
|
||||
res, err := iac.iClient.ListInstances(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var is []*InstanceInfo
|
||||
for _, i := range res.Instances {
|
||||
m := instanceNameRegexp.FindStringSubmatch(i.Name)
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("malformed instance name %q", i.Name)
|
||||
}
|
||||
is = append(is, &InstanceInfo{
|
||||
Name: m[2],
|
||||
DisplayName: i.DisplayName,
|
||||
})
|
||||
}
|
||||
return is, nil
|
||||
}
|
||||
|
||||
// InstanceInfo returns information about an instance.
|
||||
func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.GetInstanceRequest{
|
||||
Name: "projects/" + iac.project + "/instances/" + instanceId,
|
||||
}
|
||||
res, err := iac.iClient.GetInstance(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := instanceNameRegexp.FindStringSubmatch(res.Name)
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("malformed instance name %q", res.Name)
|
||||
}
|
||||
return &InstanceInfo{
|
||||
Name: m[2],
|
||||
DisplayName: res.DisplayName,
|
||||
}, nil
|
||||
}
|
||||
178
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
Normal file
178
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
"golang.org/x/net/context"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
defer testEnv.Close()
|
||||
|
||||
timeout := 2 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
iAdminClient, err := testEnv.NewInstanceAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewInstanceAdminClient: %v", err)
|
||||
}
|
||||
if iAdminClient != nil {
|
||||
defer iAdminClient.Close()
|
||||
|
||||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
|
||||
if err != nil {
|
||||
t.Errorf("InstanceInfo: %v", err)
|
||||
}
|
||||
if iInfo.Name != adminClient.instance {
|
||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
|
||||
}
|
||||
}
|
||||
|
||||
list := func() []string {
|
||||
tbls, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Fetching list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tbls)
|
||||
return tbls
|
||||
}
|
||||
containsAll := func(got, want []string) bool {
|
||||
gotSet := make(map[string]bool)
|
||||
|
||||
for _, s := range got {
|
||||
gotSet[s] = true
|
||||
}
|
||||
for _, s := range want {
|
||||
if !gotSet[s] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "mytable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "myothertable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
tables := list()
|
||||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) {
|
||||
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
|
||||
}
|
||||
|
||||
tblConf := TableConf{
|
||||
TableID: "conftable",
|
||||
Families: map[string]GCPolicy{
|
||||
"fam1": MaxVersionsPolicy(1),
|
||||
"fam2": MaxVersionsPolicy(2),
|
||||
},
|
||||
}
|
||||
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil {
|
||||
t.Fatalf("Creating table from TableConf: %v", err)
|
||||
}
|
||||
defer adminClient.DeleteTable(ctx, tblConf.TableID)
|
||||
|
||||
tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID)
|
||||
if err != nil {
|
||||
t.Fatalf("Getting table info: %v", err)
|
||||
}
|
||||
sort.Strings(tblInfo.Families)
|
||||
wantFams := []string{"fam1", "fam2"}
|
||||
if !reflect.DeepEqual(tblInfo.Families, wantFams) {
|
||||
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
|
||||
}
|
||||
|
||||
// Populate mytable and drop row ranges
|
||||
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
client, err := testEnv.NewClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
tbl := client.Open("mytable")
|
||||
|
||||
prefixes := []string{"a", "b", "c"}
|
||||
for _, prefix := range prefixes {
|
||||
for i := 0; i < 5; i++ {
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 0, []byte("1"))
|
||||
if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil {
|
||||
t.Errorf("DropRowRange a: %v", err)
|
||||
}
|
||||
if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil {
|
||||
t.Errorf("DropRowRange c: %v", err)
|
||||
}
|
||||
if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil {
|
||||
t.Errorf("DropRowRange x: %v", err)
|
||||
}
|
||||
|
||||
var gotRowCount int
|
||||
tbl.ReadRows(ctx, RowRange{}, func(row Row) bool {
|
||||
gotRowCount += 1
|
||||
if !strings.HasPrefix(row.Key(), "b") {
|
||||
t.Errorf("Invalid row after dropping range: %v", row)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if gotRowCount != 5 {
|
||||
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
|
||||
}
|
||||
}
|
||||
782
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
Normal file
782
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
Normal file
@@ -0,0 +1,782 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable // import "cloud.google.com/go/bigtable"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/internal/gax"
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const prodAddr = "bigtable.googleapis.com:443"
|
||||
|
||||
// Client is a client for reading and writing data to tables in an instance.
|
||||
//
|
||||
// A Client is safe to use concurrently, except for its Close method.
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
client btpb.BigtableClient
|
||||
project, instance string
|
||||
}
|
||||
|
||||
// NewClient creates a new Client for a given project and instance.
|
||||
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
|
||||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Default to a small connection pool that can be overridden.
|
||||
o = append(o, option.WithGRPCConnectionPool(4))
|
||||
o = append(o, opts...)
|
||||
conn, err := gtransport.Dial(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
conn: conn,
|
||||
client: btpb.NewBigtableClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
var (
|
||||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted}
|
||||
isIdempotentRetryCode = make(map[codes.Code]bool)
|
||||
retryOptions = []gax.CallOption{
|
||||
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
|
||||
gax.WithRetryCodes(idempotentRetryCodes),
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, code := range idempotentRetryCodes {
|
||||
isIdempotentRetryCode[code] = true
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) fullTableName(table string) string {
|
||||
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
|
||||
}
|
||||
|
||||
// A Table refers to a table.
|
||||
//
|
||||
// A Table is safe to use concurrently.
|
||||
type Table struct {
|
||||
c *Client
|
||||
table string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// Open opens a table.
|
||||
func (c *Client) Open(table string) *Table {
|
||||
return &Table{
|
||||
c: c,
|
||||
table: table,
|
||||
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Read method that returns a sequence of ReadItems.
|
||||
|
||||
// ReadRows reads rows from a table. f is called for each row.
|
||||
// If f returns false, the stream is shut down and ReadRows returns.
|
||||
// f owns its argument, and f is called serially in order by row key.
|
||||
//
|
||||
// By default, the yielded rows will contain all values in all cells.
|
||||
// Use RowFilter to limit the cells returned.
|
||||
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
|
||||
var prevRowKey string
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Rows: arg.proto(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.set(req)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
||||
defer cancel()
|
||||
|
||||
stream, err := t.c.client.ReadRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr := newChunkReader()
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// Reset arg for next Invoke call.
|
||||
arg = arg.retainRowsAfter(prevRowKey)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cc := range res.Chunks {
|
||||
row, err := cr.Process(cc)
|
||||
if err != nil {
|
||||
// No need to prepare for a retry, this is an unretryable error.
|
||||
return err
|
||||
}
|
||||
if row == nil {
|
||||
continue
|
||||
}
|
||||
prevRowKey = row.Key()
|
||||
if !f(row) {
|
||||
// Cancel and drain stream.
|
||||
cancel()
|
||||
for {
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
// The stream has ended. We don't return an error
|
||||
// because the caller has intentionally interrupted the scan.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
// No need to prepare for a retry, this is an unretryable error.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, retryOptions...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadRow is a convenience implementation of a single-row reader.
|
||||
// A missing row will return a zero-length map and a nil error.
|
||||
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
|
||||
var r Row
|
||||
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
|
||||
r = rr
|
||||
return true
|
||||
}, opts...)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decodeFamilyProto adds the cell data from f to the given row.
|
||||
func decodeFamilyProto(r Row, row string, f *btpb.Family) {
|
||||
fam := f.Name // does not have colon
|
||||
for _, col := range f.Columns {
|
||||
for _, cell := range col.Cells {
|
||||
ri := ReadItem{
|
||||
Row: row,
|
||||
Column: fam + ":" + string(col.Qualifier),
|
||||
Timestamp: Timestamp(cell.TimestampMicros),
|
||||
Value: cell.Value,
|
||||
}
|
||||
r[fam] = append(r[fam], ri)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
|
||||
type RowSet interface {
|
||||
proto() *btpb.RowSet
|
||||
|
||||
// retainRowsAfter returns a new RowSet that does not include the
|
||||
// given row key or any row key lexicographically less than it.
|
||||
retainRowsAfter(lastRowKey string) RowSet
|
||||
|
||||
// Valid reports whether this set can cover at least one row.
|
||||
valid() bool
|
||||
}
|
||||
|
||||
// RowList is a sequence of row keys.
|
||||
type RowList []string
|
||||
|
||||
func (r RowList) proto() *btpb.RowSet {
|
||||
keys := make([][]byte, len(r))
|
||||
for i, row := range r {
|
||||
keys[i] = []byte(row)
|
||||
}
|
||||
return &btpb.RowSet{RowKeys: keys}
|
||||
}
|
||||
|
||||
func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
|
||||
var retryKeys RowList
|
||||
for _, key := range r {
|
||||
if key > lastRowKey {
|
||||
retryKeys = append(retryKeys, key)
|
||||
}
|
||||
}
|
||||
return retryKeys
|
||||
}
|
||||
|
||||
func (r RowList) valid() bool {
|
||||
return len(r) > 0
|
||||
}
|
||||
|
||||
// A RowRange is a half-open interval [Start, Limit) encompassing
|
||||
// all the rows with keys at least as large as Start, and less than Limit.
|
||||
// (Bigtable string comparison is the same as Go's.)
|
||||
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
|
||||
type RowRange struct {
|
||||
start string
|
||||
limit string
|
||||
}
|
||||
|
||||
// NewRange returns the new RowRange [begin, end).
|
||||
func NewRange(begin, end string) RowRange {
|
||||
return RowRange{
|
||||
start: begin,
|
||||
limit: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Unbounded tests whether a RowRange is unbounded.
|
||||
func (r RowRange) Unbounded() bool {
|
||||
return r.limit == ""
|
||||
}
|
||||
|
||||
// Contains says whether the RowRange contains the key.
|
||||
func (r RowRange) Contains(row string) bool {
|
||||
return r.start <= row && (r.limit == "" || r.limit > row)
|
||||
}
|
||||
|
||||
// String provides a printable description of a RowRange.
|
||||
func (r RowRange) String() string {
|
||||
a := strconv.Quote(r.start)
|
||||
if r.Unbounded() {
|
||||
return fmt.Sprintf("[%s,∞)", a)
|
||||
}
|
||||
return fmt.Sprintf("[%s,%q)", a, r.limit)
|
||||
}
|
||||
|
||||
func (r RowRange) proto() *btpb.RowSet {
|
||||
rr := &btpb.RowRange{
|
||||
StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
|
||||
}
|
||||
if !r.Unbounded() {
|
||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
|
||||
}
|
||||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
|
||||
}
|
||||
|
||||
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
|
||||
if lastRowKey == "" || lastRowKey < r.start {
|
||||
return r
|
||||
}
|
||||
// Set the beginning of the range to the row after the last scanned.
|
||||
start := lastRowKey + "\x00"
|
||||
if r.Unbounded() {
|
||||
return InfiniteRange(start)
|
||||
}
|
||||
return NewRange(start, r.limit)
|
||||
}
|
||||
|
||||
func (r RowRange) valid() bool {
|
||||
return r.start < r.limit
|
||||
}
|
||||
|
||||
// RowRangeList is a sequence of RowRanges representing the union of the ranges.
|
||||
type RowRangeList []RowRange
|
||||
|
||||
func (r RowRangeList) proto() *btpb.RowSet {
|
||||
ranges := make([]*btpb.RowRange, len(r))
|
||||
for i, rr := range r {
|
||||
// RowRange.proto() returns a RowSet with a single element RowRange array
|
||||
ranges[i] = rr.proto().RowRanges[0]
|
||||
}
|
||||
return &btpb.RowSet{RowRanges: ranges}
|
||||
}
|
||||
|
||||
func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet {
|
||||
if lastRowKey == "" {
|
||||
return r
|
||||
}
|
||||
// Return a list of any range that has not yet been completely processed
|
||||
var ranges RowRangeList
|
||||
for _, rr := range r {
|
||||
retained := rr.retainRowsAfter(lastRowKey)
|
||||
if retained.valid() {
|
||||
ranges = append(ranges, retained.(RowRange))
|
||||
}
|
||||
}
|
||||
return ranges
|
||||
}
|
||||
|
||||
func (r RowRangeList) valid() bool {
|
||||
for _, rr := range r {
|
||||
if rr.valid() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SingleRow returns a RowSet for reading a single row.
|
||||
func SingleRow(row string) RowSet {
|
||||
return RowList{row}
|
||||
}
|
||||
|
||||
// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
|
||||
func PrefixRange(prefix string) RowRange {
|
||||
return RowRange{
|
||||
start: prefix,
|
||||
limit: prefixSuccessor(prefix),
|
||||
}
|
||||
}
|
||||
|
||||
// InfiniteRange returns the RowRange consisting of all keys at least as
|
||||
// large as start.
|
||||
func InfiniteRange(start string) RowRange {
|
||||
return RowRange{
|
||||
start: start,
|
||||
limit: "",
|
||||
}
|
||||
}
|
||||
|
||||
// prefixSuccessor returns the lexically smallest string greater than the
|
||||
// prefix, if it exists, or "" otherwise. In either case, it is the string
|
||||
// needed for the Limit of a RowRange.
|
||||
func prefixSuccessor(prefix string) string {
|
||||
if prefix == "" {
|
||||
return "" // infinite range
|
||||
}
|
||||
n := len(prefix)
|
||||
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
|
||||
}
|
||||
if n == -1 {
|
||||
return ""
|
||||
}
|
||||
ans := []byte(prefix[:n])
|
||||
ans = append(ans, prefix[n]+1)
|
||||
return string(ans)
|
||||
}
|
||||
|
||||
// A ReadOption is an optional argument to ReadRows.
|
||||
type ReadOption interface {
|
||||
set(req *btpb.ReadRowsRequest)
|
||||
}
|
||||
|
||||
// RowFilter returns a ReadOption that applies f to the contents of read rows.
|
||||
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
|
||||
|
||||
type rowFilter struct{ f Filter }
|
||||
|
||||
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }
|
||||
|
||||
// LimitRows returns a ReadOption that will limit the number of rows to be read.
|
||||
func LimitRows(limit int64) ReadOption { return limitRows{limit} }
|
||||
|
||||
type limitRows struct{ limit int64 }
|
||||
|
||||
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }
|
||||
|
||||
// mutationsAreRetryable returns true if all mutations are idempotent
|
||||
// and therefore retryable. A mutation is idempotent iff all cell timestamps
|
||||
// have an explicit timestamp set and do not rely on the timestamp being set on the server.
|
||||
func mutationsAreRetryable(muts []*btpb.Mutation) bool {
|
||||
serverTime := int64(ServerTime)
|
||||
for _, mut := range muts {
|
||||
setCell := mut.GetSetCell()
|
||||
if setCell != nil && setCell.TimestampMicros == serverTime {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Apply applies a Mutation to a specific row.
|
||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
var callOptions []gax.CallOption
|
||||
if m.cond == nil {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Mutations: m.ops,
|
||||
}
|
||||
if mutationsAreRetryable(m.ops) {
|
||||
callOptions = retryOptions
|
||||
}
|
||||
var res *btpb.MutateRowResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
res, err = t.c.client.MutateRow(ctx, req)
|
||||
return err
|
||||
}, callOptions...)
|
||||
if err == nil {
|
||||
after(res)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
req := &btpb.CheckAndMutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
PredicateFilter: m.cond.proto(),
|
||||
}
|
||||
if m.mtrue != nil {
|
||||
req.TrueMutations = m.mtrue.ops
|
||||
}
|
||||
if m.mfalse != nil {
|
||||
req.FalseMutations = m.mfalse.ops
|
||||
}
|
||||
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
|
||||
callOptions = retryOptions
|
||||
}
|
||||
var cmRes *btpb.CheckAndMutateRowResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
|
||||
return err
|
||||
}, callOptions...)
|
||||
if err == nil {
|
||||
after(cmRes)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// An ApplyOption is an optional argument to Apply.
|
||||
type ApplyOption interface {
|
||||
after(res proto.Message)
|
||||
}
|
||||
|
||||
type applyAfterFunc func(res proto.Message)
|
||||
|
||||
func (a applyAfterFunc) after(res proto.Message) { a(res) }
|
||||
|
||||
// GetCondMutationResult returns an ApplyOption that reports whether the conditional
|
||||
// mutation's condition matched.
|
||||
func GetCondMutationResult(matched *bool) ApplyOption {
|
||||
return applyAfterFunc(func(res proto.Message) {
|
||||
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
|
||||
*matched = res.PredicateMatched
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Mutation represents a set of changes for a single row of a table.
|
||||
type Mutation struct {
|
||||
ops []*btpb.Mutation
|
||||
|
||||
// for conditional mutations
|
||||
cond Filter
|
||||
mtrue, mfalse *Mutation
|
||||
}
|
||||
|
||||
// NewMutation returns a new mutation.
|
||||
func NewMutation() *Mutation {
|
||||
return new(Mutation)
|
||||
}
|
||||
|
||||
// NewCondMutation returns a conditional mutation.
|
||||
// The given row filter determines which mutation is applied:
|
||||
// If the filter matches any cell in the row, mtrue is applied;
|
||||
// otherwise, mfalse is applied.
|
||||
// Either given mutation may be nil.
|
||||
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
|
||||
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
|
||||
}
|
||||
|
||||
// Set sets a value in a specified column, with the given timestamp.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
// A timestamp of ServerTime means to use the server timestamp.
|
||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimestampMicros: int64(ts.TruncateToMilliseconds()),
|
||||
Value: value,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
||||
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteTimestampRange deletes all cells whose columns are family:column
|
||||
// and whose timestamps are in the half-open interval [start, end).
|
||||
// If end is zero, it will be interpreted as infinity.
|
||||
// The timestamps will be truncated to millisecond granularity.
|
||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimeRange: &btpb.TimestampRange{
|
||||
StartTimestampMicros: int64(start.TruncateToMilliseconds()),
|
||||
EndTimestampMicros: int64(end.TruncateToMilliseconds()),
|
||||
},
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
||||
func (m *Mutation) DeleteCellsInFamily(family string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
|
||||
FamilyName: family,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteRow deletes the entire row.
|
||||
func (m *Mutation) DeleteRow() {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
|
||||
}
|
||||
|
||||
// entryErr is a container that combines an entry with the error that was returned for it.
|
||||
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
|
||||
type entryErr struct {
|
||||
Entry *btpb.MutateRowsRequest_Entry
|
||||
Err error
|
||||
}
|
||||
|
||||
// ApplyBulk applies multiple Mutations.
|
||||
// Each mutation is individually applied atomically,
|
||||
// but the set of mutations may be applied in any order.
|
||||
//
|
||||
// Two types of failures may occur. If the entire process
|
||||
// fails, (nil, err) will be returned. If specific mutations
|
||||
// fail to apply, ([]err, nil) will be returned, and the errors
|
||||
// will correspond to the relevant rowKeys/muts arguments.
|
||||
//
|
||||
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
|
||||
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
if len(rowKeys) != len(muts) {
|
||||
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
|
||||
}
|
||||
|
||||
origEntries := make([]*entryErr, len(rowKeys))
|
||||
for i, key := range rowKeys {
|
||||
mut := muts[i]
|
||||
if mut.cond != nil {
|
||||
return nil, errors.New("conditional mutations cannot be applied in bulk")
|
||||
}
|
||||
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
|
||||
}
|
||||
|
||||
// entries will be reduced after each invocation to just what needs to be retried.
|
||||
entries := make([]*entryErr, len(rowKeys))
|
||||
copy(entries, origEntries)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := t.doApplyBulk(ctx, entries, opts...)
|
||||
if err != nil {
|
||||
// We want to retry the entire request with the current entries
|
||||
return err
|
||||
}
|
||||
entries = t.getApplyBulkRetries(entries)
|
||||
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
|
||||
// We have at least one mutation that needs to be retried.
|
||||
// Return an arbitrary error that is retryable according to callOptions.
|
||||
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
||||
}
|
||||
return nil
|
||||
}, retryOptions...)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Accumulate all of the errors into an array to return, interspersed with nils for successful
|
||||
// entries. The absence of any errors means we should return nil.
|
||||
var errs []error
|
||||
var foundErr bool
|
||||
for _, entry := range origEntries {
|
||||
if entry.Err != nil {
|
||||
foundErr = true
|
||||
}
|
||||
errs = append(errs, entry.Err)
|
||||
}
|
||||
if foundErr {
|
||||
return errs, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getApplyBulkRetries returns the entries that need to be retried
|
||||
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
|
||||
var retryEntries []*entryErr
|
||||
for _, entry := range entries {
|
||||
err := entry.Err
|
||||
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
|
||||
// There was an error and the entry is retryable.
|
||||
retryEntries = append(retryEntries, entry)
|
||||
}
|
||||
}
|
||||
return retryEntries
|
||||
}
|
||||
|
||||
// doApplyBulk does the work of a single ApplyBulk invocation
|
||||
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
|
||||
for i, entryErr := range entryErrs {
|
||||
entries[i] = entryErr.Entry
|
||||
}
|
||||
req := &btpb.MutateRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Entries: entries,
|
||||
}
|
||||
stream, err := t.c.client.MutateRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, entry := range res.Entries {
|
||||
status := entry.Status
|
||||
if status.Code == int32(codes.OK) {
|
||||
entryErrs[i].Err = nil
|
||||
} else {
|
||||
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
|
||||
}
|
||||
}
|
||||
after(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp is in units of microseconds since 1 January 1970.
|
||||
type Timestamp int64
|
||||
|
||||
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
|
||||
// It indicates that the server's timestamp should be used.
|
||||
const ServerTime Timestamp = -1
|
||||
|
||||
// Time converts a time.Time into a Timestamp.
|
||||
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
|
||||
|
||||
// Now returns the Timestamp representation of the current time on the client.
|
||||
func Now() Timestamp { return Time(time.Now()) }
|
||||
|
||||
// Time converts a Timestamp into a time.Time.
|
||||
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
|
||||
|
||||
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
|
||||
// which is currently the only granularity supported.
|
||||
func (ts Timestamp) TruncateToMilliseconds() Timestamp {
|
||||
if ts == ServerTime {
|
||||
return ts
|
||||
}
|
||||
return ts - ts%1000
|
||||
}
|
||||
|
||||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
|
||||
// It returns the newly written cells.
|
||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
req := &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Rules: m.ops,
|
||||
}
|
||||
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.Row == nil {
|
||||
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
|
||||
}
|
||||
r := make(Row)
|
||||
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
|
||||
decodeFamilyProto(r, row, fam)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ReadModifyWrite represents a set of operations on a single row of a table.
|
||||
// It is like Mutation but for non-idempotent changes.
|
||||
// When applied, these operations operate on the latest values of the row's cells,
|
||||
// and result in a new value being written to the relevant cell with a timestamp
|
||||
// that is max(existing timestamp, current server time).
|
||||
//
|
||||
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
|
||||
// be executed serially by the server.
|
||||
type ReadModifyWrite struct {
|
||||
ops []*btpb.ReadModifyWriteRule
|
||||
}
|
||||
|
||||
// NewReadModifyWrite returns a new ReadModifyWrite.
|
||||
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
|
||||
|
||||
// AppendValue appends a value to a specific cell's value.
|
||||
// If the cell is unset, it will be treated as an empty value.
|
||||
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
|
||||
})
|
||||
}
|
||||
|
||||
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
|
||||
// and adds a value to it. If the cell is unset, it will be treated as zero.
|
||||
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
|
||||
// operation will fail.
|
||||
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
|
||||
})
|
||||
}
|
||||
|
||||
// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata,
|
||||
// if any, joined with internal metadata.
|
||||
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
mdCopy, _ := metadata.FromOutgoingContext(ctx)
|
||||
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
|
||||
}
|
||||
906
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
Normal file
906
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
Normal file
@@ -0,0 +1,906 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
prefix, succ string
|
||||
}{
|
||||
{"", ""},
|
||||
{"\xff", ""}, // when used, "" means Infinity
|
||||
{"x\xff", "y"},
|
||||
{"\xfe", "\xff"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := prefixSuccessor(tc.prefix)
|
||||
if got != tc.succ {
|
||||
t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ)
|
||||
continue
|
||||
}
|
||||
r := PrefixRange(tc.prefix)
|
||||
if tc.succ == "" && r.limit != "" {
|
||||
t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit)
|
||||
}
|
||||
if tc.succ != "" && r.limit != tc.succ {
|
||||
t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientIntegration(t *testing.T) {
|
||||
start := time.Now()
|
||||
lastCheckpoint := start
|
||||
checkpoint := func(s string) {
|
||||
n := time.Now()
|
||||
t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
|
||||
lastCheckpoint = n
|
||||
}
|
||||
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
|
||||
timeout := 30 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
t.Logf("Running test against production")
|
||||
} else {
|
||||
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
client, err := testEnv.NewClient()
|
||||
if err != nil {
|
||||
t.Fatalf("Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
checkpoint("dialed Client")
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("AdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
checkpoint("dialed AdminClient")
|
||||
|
||||
table := testEnv.Config().Table
|
||||
|
||||
// Delete the table at the end of the test.
|
||||
// Do this even before creating the table so that if this is running
|
||||
// against production and CreateTable fails there's a chance of cleaning it up.
|
||||
defer adminClient.DeleteTable(ctx, table)
|
||||
|
||||
if err := adminClient.CreateTable(ctx, table); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
checkpoint("created table")
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
checkpoint(`created "follows" column family`)
|
||||
|
||||
tbl := client.Open(table)
|
||||
|
||||
// Insert some data.
|
||||
initialData := map[string][]string{
|
||||
"wmckinley": {"tjefferson"},
|
||||
"gwashington": {"jadams"},
|
||||
"tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below
|
||||
"jadams": {"gwashington", "tjefferson"},
|
||||
}
|
||||
for row, ss := range initialData {
|
||||
mut := NewMutation()
|
||||
for _, name := range ss {
|
||||
mut.Set("follows", name, 0, []byte("1"))
|
||||
}
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
t.Errorf("Mutating row %q: %v", row, err)
|
||||
}
|
||||
}
|
||||
checkpoint("inserted initial data")
|
||||
|
||||
// Do a conditional mutation with a complex filter.
|
||||
mutTrue := NewMutation()
|
||||
mutTrue.Set("follows", "wmckinley", 0, []byte("1"))
|
||||
filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter("."))
|
||||
mut := NewCondMutation(filter, mutTrue, nil)
|
||||
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
|
||||
t.Errorf("Conditionally mutating row: %v", err)
|
||||
}
|
||||
// Do a second condition mutation with a filter that does not match,
|
||||
// and thus no changes should be made.
|
||||
mutTrue = NewMutation()
|
||||
mutTrue.DeleteRow()
|
||||
filter = ColumnFilter("snoop.dogg")
|
||||
mut = NewCondMutation(filter, mutTrue, nil)
|
||||
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
|
||||
t.Errorf("Conditionally mutating row: %v", err)
|
||||
}
|
||||
checkpoint("did two conditional mutations")
|
||||
|
||||
// Fetch a row.
|
||||
row, err := tbl.ReadRow(ctx, "jadams")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a row: %v", err)
|
||||
}
|
||||
wantRow := Row{
|
||||
"follows": []ReadItem{
|
||||
{Row: "jadams", Column: "follows:gwashington", Value: []byte("1")},
|
||||
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
checkpoint("tested ReadRow")
|
||||
|
||||
// Do a bunch of reads with filters.
|
||||
readTests := []struct {
|
||||
desc string
|
||||
rr RowSet
|
||||
filter Filter // may be nil
|
||||
limit ReadOption // may be nil
|
||||
|
||||
// We do the read, grab all the cells, turn them into "<row>-<col>-<val>",
|
||||
// and join with a comma.
|
||||
want string
|
||||
}{
|
||||
{
|
||||
desc: "read all, unfiltered",
|
||||
rr: RowRange{},
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with InfiniteRange, unfiltered",
|
||||
rr: InfiniteRange("tjefferson"),
|
||||
want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with NewRange, unfiltered",
|
||||
rr: NewRange("gargamel", "hubbard"),
|
||||
want: "gwashington-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read with PrefixRange, unfiltered",
|
||||
rr: PrefixRange("jad"),
|
||||
want: "jadams-gwashington-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with SingleRow, unfiltered",
|
||||
rr: SingleRow("wmckinley"),
|
||||
want: "wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read all, with ColumnFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read range, with ColumnRangeFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnRangeFilter("follows", "h", "k"),
|
||||
want: "gwashington-jadams-1,tjefferson-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read range from empty, with ColumnRangeFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnRangeFilter("follows", "", "u"),
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read range from start to empty, with ColumnRangeFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnRangeFilter("follows", "h", ""),
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with RowKeyFilter",
|
||||
rr: RowRange{},
|
||||
filter: RowKeyFilter(".*wash.*"),
|
||||
want: "gwashington-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read with RowKeyFilter, no matches",
|
||||
rr: RowRange{},
|
||||
filter: RowKeyFilter(".*xxx.*"),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with FamilyFilter, no matches",
|
||||
rr: RowRange{},
|
||||
filter: FamilyFilter(".*xxx.*"),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with ColumnFilter + row limit",
|
||||
rr: RowRange{},
|
||||
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
||||
limit: LimitRows(2),
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read all, strip values",
|
||||
rr: RowRange{},
|
||||
filter: StripValueFilter(),
|
||||
want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with ColumnFilter + row limit + strip values",
|
||||
rr: RowRange{},
|
||||
filter: ChainFilters(ColumnFilter(".*j.*"), StripValueFilter()), // matches "jadams" and "tjefferson"
|
||||
limit: LimitRows(2),
|
||||
want: "gwashington-jadams-,jadams-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with condition, strip values on true",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ColumnFilter(".*j.*"), StripValueFilter(), nil),
|
||||
want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with condition, strip values on false",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ColumnFilter(".*xxx.*"), nil, StripValueFilter()),
|
||||
want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with ValueRangeFilter + row limit",
|
||||
rr: RowRange{},
|
||||
filter: ValueRangeFilter([]byte("1"), []byte("5")), // matches our value of "1"
|
||||
limit: LimitRows(2),
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with ValueRangeFilter, no match on exclusive end",
|
||||
rr: RowRange{},
|
||||
filter: ValueRangeFilter([]byte("0"), []byte("1")), // no match
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with ValueRangeFilter, no matches",
|
||||
rr: RowRange{},
|
||||
filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with InterleaveFilter, no matches on all filters",
|
||||
rr: RowRange{},
|
||||
filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with InterleaveFilter, no duplicate cells",
|
||||
rr: RowRange{},
|
||||
filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")),
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with InterleaveFilter, with duplicate cells",
|
||||
rr: RowRange{},
|
||||
filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")),
|
||||
want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1",
|
||||
},
|
||||
{
|
||||
desc: "read with a RowRangeList and no filter",
|
||||
rr: RowRangeList{NewRange("gargamel", "hubbard"), InfiniteRange("wmckinley")},
|
||||
want: "gwashington-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "chain that excludes rows and matches nothing, in a condition",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil),
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range readTests {
|
||||
var opts []ReadOption
|
||||
if tc.filter != nil {
|
||||
opts = append(opts, RowFilter(tc.filter))
|
||||
}
|
||||
if tc.limit != nil {
|
||||
opts = append(opts, tc.limit)
|
||||
}
|
||||
var elt []string
|
||||
err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
elt = append(elt, formatReadItem(ri))
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if got := strings.Join(elt, ","); got != tc.want {
|
||||
t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
// Read a RowList
|
||||
var elt []string
|
||||
keys := RowList{"wmckinley", "gwashington", "jadams"}
|
||||
want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1"
|
||||
err = tbl.ReadRows(ctx, keys, func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
elt = append(elt, formatReadItem(ri))
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("read RowList: %v", err)
|
||||
}
|
||||
|
||||
if got := strings.Join(elt, ","); got != want {
|
||||
t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want)
|
||||
}
|
||||
checkpoint("tested ReadRows in a few ways")
|
||||
|
||||
// Do a scan and stop part way through.
|
||||
// Verify that the ReadRows callback doesn't keep running.
|
||||
stopped := false
|
||||
err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool {
|
||||
if r.Key() < "h" {
|
||||
return true
|
||||
}
|
||||
if !stopped {
|
||||
stopped = true
|
||||
return false
|
||||
}
|
||||
t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key())
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Partial ReadRows: %v", err)
|
||||
}
|
||||
checkpoint("did partial ReadRows test")
|
||||
|
||||
// Delete a row and check it goes away.
|
||||
mut = NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, "wmckinley", mut); err != nil {
|
||||
t.Errorf("Apply DeleteRow: %v", err)
|
||||
}
|
||||
row, err = tbl.ReadRow(ctx, "wmckinley")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a row after DeleteRow: %v", err)
|
||||
}
|
||||
if len(row) != 0 {
|
||||
t.Fatalf("Read non-zero row after DeleteRow: %v", row)
|
||||
}
|
||||
checkpoint("exercised DeleteRow")
|
||||
|
||||
// Check ReadModifyWrite.
|
||||
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
appendRMW := func(b []byte) *ReadModifyWrite {
|
||||
rmw := NewReadModifyWrite()
|
||||
rmw.AppendValue("counter", "likes", b)
|
||||
return rmw
|
||||
}
|
||||
incRMW := func(n int64) *ReadModifyWrite {
|
||||
rmw := NewReadModifyWrite()
|
||||
rmw.Increment("counter", "likes", n)
|
||||
return rmw
|
||||
}
|
||||
rmwSeq := []struct {
|
||||
desc string
|
||||
rmw *ReadModifyWrite
|
||||
want []byte
|
||||
}{
|
||||
{
|
||||
desc: "append #1",
|
||||
rmw: appendRMW([]byte{0, 0, 0}),
|
||||
want: []byte{0, 0, 0},
|
||||
},
|
||||
{
|
||||
desc: "append #2",
|
||||
rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17
|
||||
want: []byte{0, 0, 0, 0, 0, 0, 0, 17},
|
||||
},
|
||||
{
|
||||
desc: "increment",
|
||||
rmw: incRMW(8),
|
||||
want: []byte{0, 0, 0, 0, 0, 0, 0, 25},
|
||||
},
|
||||
}
|
||||
for _, step := range rmwSeq {
|
||||
row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw)
|
||||
if err != nil {
|
||||
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
|
||||
}
|
||||
clearTimestamps(row)
|
||||
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
|
||||
}
|
||||
}
|
||||
checkpoint("tested ReadModifyWrite")
|
||||
|
||||
// Test arbitrary timestamps more thoroughly.
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
const numVersions = 4
|
||||
mut = NewMutation()
|
||||
for i := 0; i < numVersions; i++ {
|
||||
// Timestamps are used in thousands because the server
|
||||
// only permits that granularity.
|
||||
mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
|
||||
mut.Set("ts", "col2", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
|
||||
}
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err := tbl.ReadRow(ctx, "testrow")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
// These should be returned in descending timestamp order.
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Do the same read, but filter to the latest two versions.
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Check cell offset / limit
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowLimitFilter(3)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Check timestamp range filtering (with truncation)
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete non-existing cells, no such column family in this row
|
||||
// Should not delete anything
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete non-existing cells, no such column in this column family
|
||||
// Should not delete anything
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete the cell with timestamp 2000 and repeat the last read,
|
||||
// checking that we get ts 3000 and ts 1000.
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("ts", "col", 2001, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested multiple versions in a cell")
|
||||
|
||||
// Check DeleteCellsInFamily
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
mut = NewMutation()
|
||||
mut.Set("status", "start", 0, []byte("1"))
|
||||
mut.Set("status", "end", 0, []byte("2"))
|
||||
mut.Set("ts", "col", 0, []byte("3"))
|
||||
if err := tbl.Apply(ctx, "row1", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
if err := tbl.Apply(ctx, "row2", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInFamily("status")
|
||||
if err := tbl.Apply(ctx, "row1", mut); err != nil {
|
||||
t.Errorf("Delete cf: %v", err)
|
||||
}
|
||||
|
||||
// ColumnFamily removed
|
||||
r, err = tbl.ReadRow(ctx, "row1")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
|
||||
// ColumnFamily not removed
|
||||
r, err = tbl.ReadRow(ctx, "row2")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{
|
||||
"ts": []ReadItem{
|
||||
{Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
|
||||
},
|
||||
"status": []ReadItem{
|
||||
{Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")},
|
||||
{Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested family delete")
|
||||
|
||||
// Check DeleteCellsInColumn
|
||||
mut = NewMutation()
|
||||
mut.Set("status", "start", 0, []byte("1"))
|
||||
mut.Set("status", "middle", 0, []byte("2"))
|
||||
mut.Set("status", "end", 0, []byte("3"))
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInColumn("status", "middle")
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Delete column: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{
|
||||
"status": []ReadItem{
|
||||
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
|
||||
{Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInColumn("status", "start")
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Delete column: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{
|
||||
"status": []ReadItem{
|
||||
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInColumn("status", "end")
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Delete column: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if len(r) != 0 {
|
||||
t.Errorf("Delete column: got %v, want empty row", r)
|
||||
}
|
||||
// Add same cell after delete
|
||||
mut = NewMutation()
|
||||
mut.Set("status", "end", 0, []byte("3"))
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested column delete")
|
||||
|
||||
// Do highly concurrent reads/writes.
|
||||
// TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved.
|
||||
const maxConcurrency = 100
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
switch r := rand.Intn(100); { // r ∈ [0,100)
|
||||
case 0 <= r && r < 30:
|
||||
// Do a read.
|
||||
_, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1)))
|
||||
if err != nil {
|
||||
t.Errorf("Concurrent read: %v", err)
|
||||
}
|
||||
case 30 <= r && r < 100:
|
||||
// Do a write.
|
||||
mut := NewMutation()
|
||||
mut.Set("ts", "col", 0, []byte("data"))
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Errorf("Concurrent write: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
checkpoint("tested high concurrency")
|
||||
|
||||
// Large reads, writes and scans.
|
||||
bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB.
|
||||
nonsense := []byte("lorem ipsum dolor sit amet, ")
|
||||
fill(bigBytes, nonsense)
|
||||
mut = NewMutation()
|
||||
mut.Set("ts", "col", 0, bigBytes)
|
||||
if err := tbl.Apply(ctx, "bigrow", mut); err != nil {
|
||||
t.Errorf("Big write: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "bigrow")
|
||||
if err != nil {
|
||||
t.Errorf("Big read: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "bigrow", Column: "ts:col", Value: bigBytes},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Big read returned incorrect bytes: %v", r)
|
||||
}
|
||||
// Now write 1000 rows, each with 82 KB values, then scan them all.
|
||||
medBytes := make([]byte, 82<<10)
|
||||
fill(medBytes, nonsense)
|
||||
sem := make(chan int, 50) // do up to 50 mutations at a time.
|
||||
for i := 0; i < 1000; i++ {
|
||||
mut := NewMutation()
|
||||
mut.Set("ts", "big-scan", 0, medBytes)
|
||||
row := fmt.Sprintf("row-%d", i)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
sem <- 1
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
t.Errorf("Preparing large scan: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
n := 0
|
||||
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
n += len(ri.Value)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, RowFilter(ColumnFilter("big-scan")))
|
||||
if err != nil {
|
||||
t.Errorf("Doing large scan: %v", err)
|
||||
}
|
||||
if want := 1000 * len(medBytes); n != want {
|
||||
t.Errorf("Large scan returned %d bytes, want %d", n, want)
|
||||
}
|
||||
// Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption.
|
||||
rc := 0
|
||||
wantRc := 3
|
||||
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
|
||||
rc++
|
||||
return true
|
||||
}, LimitRows(int64(wantRc)))
|
||||
if rc != wantRc {
|
||||
t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc)
|
||||
}
|
||||
checkpoint("tested big read/write/scan")
|
||||
|
||||
// Test bulk mutations
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
bulkData := map[string][]string{
|
||||
"red sox": {"2004", "2007", "2013"},
|
||||
"patriots": {"2001", "2003", "2004", "2014"},
|
||||
"celtics": {"1981", "1984", "1986", "2008"},
|
||||
}
|
||||
var rowKeys []string
|
||||
var muts []*Mutation
|
||||
for row, ss := range bulkData {
|
||||
mut := NewMutation()
|
||||
for _, name := range ss {
|
||||
mut.Set("bulk", name, 0, []byte("1"))
|
||||
}
|
||||
rowKeys = append(rowKeys, row)
|
||||
muts = append(muts, mut)
|
||||
}
|
||||
status, err := tbl.ApplyBulk(ctx, rowKeys, muts)
|
||||
if err != nil {
|
||||
t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err)
|
||||
}
|
||||
if status != nil {
|
||||
t.Errorf("non-nil errors: %v", err)
|
||||
}
|
||||
checkpoint("inserted bulk data")
|
||||
|
||||
// Read each row back
|
||||
for rowKey, ss := range bulkData {
|
||||
row, err := tbl.ReadRow(ctx, rowKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a bulk row: %v", err)
|
||||
}
|
||||
var wantItems []ReadItem
|
||||
for _, val := range ss {
|
||||
wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")})
|
||||
}
|
||||
wantRow := Row{"bulk": wantItems}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
}
|
||||
checkpoint("tested reading from bulk insert")
|
||||
|
||||
// Test bulk write errors.
|
||||
// Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error.
|
||||
badMut := NewMutation()
|
||||
badMut.Set("badfamily", "col", ServerTime, nil)
|
||||
badMut2 := NewMutation()
|
||||
badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1"))
|
||||
status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2})
|
||||
if err != nil {
|
||||
t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err)
|
||||
}
|
||||
if status == nil {
|
||||
t.Errorf("No errors for bad bulk mutation")
|
||||
} else if status[0] == nil || status[1] == nil {
|
||||
t.Errorf("No error for bad bulk mutation")
|
||||
}
|
||||
}
|
||||
|
||||
func formatReadItem(ri ReadItem) string {
|
||||
// Use the column qualifier only to make the test data briefer.
|
||||
col := ri.Column[strings.Index(ri.Column, ":")+1:]
|
||||
return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value)
|
||||
}
|
||||
|
||||
func fill(b, sub []byte) {
|
||||
for len(b) > len(sub) {
|
||||
n := copy(b, sub)
|
||||
b = b[n:]
|
||||
}
|
||||
}
|
||||
|
||||
func clearTimestamps(r Row) {
|
||||
for _, ris := range r {
|
||||
for i := range ris {
|
||||
ris[i].Timestamp = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
83
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
Normal file
83
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package bttest_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func ExampleNewServer() {
|
||||
|
||||
srv, err := bttest.NewServer("127.0.0.1:0")
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
proj, instance := "proj", "instance"
|
||||
|
||||
adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if err = adminClient.CreateTable(ctx, "example"); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
tbl := client.Open("example")
|
||||
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!"))
|
||||
if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else {
|
||||
for _, column := range row["links"] {
|
||||
fmt.Println(column.Column)
|
||||
fmt.Println(string(column.Value))
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// links:golang.org
|
||||
// Gophers!
|
||||
}
|
||||
1272
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
Normal file
1272
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
517
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
Normal file
517
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
Normal file
@@ -0,0 +1,517 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bttest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
"google.golang.org/grpc"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
if _, err := s.CreateTable(
|
||||
ctx,
|
||||
&btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const name = `cluster/tables/t`
|
||||
tbl := s.tables[name]
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := s.ModifyColumnFamilies(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req = &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{
|
||||
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.ModifyColumnFamilies(ctx, req); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var ts int64
|
||||
ms := func() []*btpb.Mutation {
|
||||
return []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte(`col`),
|
||||
TimestampMicros: atomic.AddInt64(&ts, 1000),
|
||||
}},
|
||||
}}
|
||||
}
|
||||
|
||||
rmw := func() *btpb.ReadModifyWriteRowRequest {
|
||||
return &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: name,
|
||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
|
||||
Rules: []*btpb.ReadModifyWriteRule{{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for ctx.Err() == nil {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: name,
|
||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
|
||||
Mutations: ms(),
|
||||
}
|
||||
s.MutateRow(ctx, req)
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for ctx.Err() == nil {
|
||||
_, _ = s.ReadModifyWriteRow(ctx, rmw())
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tbl.gc()
|
||||
}()
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Error("Concurrent mutations and GCs haven't completed after 1s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateTableWithFamily(t *testing.T) {
|
||||
// The Go client currently doesn't support creating a table with column families
|
||||
// in one operation but it is allowed by the API. This must still be supported by the
|
||||
// fake server so this test lives here instead of in the main bigtable
|
||||
// integration test.
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}},
|
||||
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}},
|
||||
},
|
||||
}
|
||||
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name})
|
||||
if err != nil {
|
||||
t.Fatalf("Getting table: %v", err)
|
||||
}
|
||||
cf := tbl.ColumnFamilies["cf1"]
|
||||
if cf == nil {
|
||||
t.Fatalf("Missing col family cf1")
|
||||
}
|
||||
if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want {
|
||||
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got)
|
||||
}
|
||||
cf = tbl.ColumnFamilies["cf2"]
|
||||
if cf == nil {
|
||||
t.Fatalf("Missing col family cf2")
|
||||
}
|
||||
if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want {
|
||||
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
type MockSampleRowKeysServer struct {
|
||||
responses []*btpb.SampleRowKeysResponse
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error {
|
||||
s.responses = append(s.responses, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSampleRowKeys(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
// Populate the table
|
||||
val := []byte("value")
|
||||
rowCount := 1000
|
||||
for i := 0; i < rowCount; i++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-" + strconv.Itoa(i)),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
Value: val,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
mock := &MockSampleRowKeysServer{}
|
||||
if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil {
|
||||
t.Errorf("SampleRowKeys error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
// Make sure the offset of the final response is the offset of the final row
|
||||
got := mock.responses[len(mock.responses)-1].OffsetBytes
|
||||
want := int64((rowCount - 1) * len(val))
|
||||
if got != want {
|
||||
t.Errorf("Invalid offset: got %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropRowRange(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
tbl := s.tables[tblInfo.Name]
|
||||
|
||||
// Populate the table
|
||||
prefixes := []string{"AAA", "BBB", "CCC", "DDD"}
|
||||
count := 3
|
||||
doWrite := func() {
|
||||
for _, prefix := range prefixes {
|
||||
for i := 0; i < count; i++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte(prefix + strconv.Itoa(i)),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
Value: []byte{},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
doWrite()
|
||||
tblSize := len(tbl.rows)
|
||||
req := &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping first range: %v", err)
|
||||
}
|
||||
got, want := len(tbl.rows), tblSize-count
|
||||
if got != want {
|
||||
t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping second range: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), tblSize-(2*count)
|
||||
if got != want {
|
||||
t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping invalid range: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), tblSize-(2*count)
|
||||
if got != want {
|
||||
t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping all data: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), 0
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop all: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
// Test that we can write rows, delete some and then write them again.
|
||||
count = 1
|
||||
doWrite()
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping all data: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), 0
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop all: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
doWrite()
|
||||
got, want = len(tbl.rows), len(prefixes)
|
||||
if got != want {
|
||||
t.Errorf("Row count after rewrite: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping range: %v", err)
|
||||
}
|
||||
doWrite()
|
||||
got, want = len(tbl.rows), len(prefixes)
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop range: got %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type MockReadRowsServer struct {
|
||||
responses []*btpb.ReadRowsResponse
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error {
|
||||
s.responses = append(s.responses, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestReadRowsOrder(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
count := 3
|
||||
mcf := func(i int) *btapb.ModifyColumnFamiliesRequest {
|
||||
return &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: tblInfo.Name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf" + strconv.Itoa(i),
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := 1; i <= count; i++ {
|
||||
_, err = s.ModifyColumnFamilies(ctx, mcf(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Populate the table
|
||||
for fc := 0; fc < count; fc++ {
|
||||
for cc := count; cc > 0; cc-- {
|
||||
for tc := 0; tc < count; tc++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf" + strconv.Itoa(fc),
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
||||
TimestampMicros: int64((tc + 1) * 1000),
|
||||
Value: []byte{},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
}
|
||||
mock := &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 27 {
|
||||
t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder := func(ms *MockReadRowsServer) {
|
||||
var prevFam, prevCol string
|
||||
var prevTime int64
|
||||
for _, cc := range ms.responses[0].Chunks {
|
||||
if prevFam == "" {
|
||||
prevFam = cc.FamilyName.Value
|
||||
prevCol = string(cc.Qualifier.Value)
|
||||
prevTime = cc.TimestampMicros
|
||||
continue
|
||||
}
|
||||
if cc.FamilyName.Value < prevFam {
|
||||
t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam)
|
||||
} else if cc.FamilyName.Value == prevFam {
|
||||
if string(cc.Qualifier.Value) < prevCol {
|
||||
t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol)
|
||||
} else if string(cc.Qualifier.Value) == prevCol {
|
||||
if cc.TimestampMicros > prevTime {
|
||||
t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
prevFam = cc.FamilyName.Value
|
||||
prevCol = string(cc.Qualifier.Value)
|
||||
prevTime = cc.TimestampMicros
|
||||
}
|
||||
}
|
||||
testOrder(mock)
|
||||
|
||||
// Read with interleave filter
|
||||
inter := &btpb.RowFilter_Interleave{}
|
||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}}
|
||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}}
|
||||
inter.Filters = append(inter.Filters, fnr, cqr)
|
||||
req = &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
Filter: &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
||||
},
|
||||
}
|
||||
mock = &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 18 {
|
||||
t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder(mock)
|
||||
|
||||
// Check order after ReadModifyWriteRow
|
||||
rmw := func(i int) *btpb.ReadModifyWriteRowRequest {
|
||||
return &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Rules: []*btpb.ReadModifyWriteRule{{
|
||||
FamilyName: "cf3",
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(i)),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := count; i > 0; i-- {
|
||||
s.ReadModifyWriteRow(ctx, rmw(i))
|
||||
}
|
||||
req = &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
}
|
||||
mock = &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 30 {
|
||||
t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder(mock)
|
||||
}
|
||||
816
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
Normal file
816
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
Normal file
@@ -0,0 +1,816 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
// Command docs are in cbtdoc.go.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
oFlag = flag.String("o", "", "if set, redirect stdout to this file")
|
||||
|
||||
config *cbtconfig.Config
|
||||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
instanceAdminClient *bigtable.InstanceAdminClient
|
||||
|
||||
version = "<unknown version>"
|
||||
revision = "<unknown revision>"
|
||||
revisionDate = "<unknown revision date>"
|
||||
)
|
||||
|
||||
func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
|
||||
if ts := config.TokenSource; ts != nil {
|
||||
opts = append(opts, option.WithTokenSource(ts))
|
||||
}
|
||||
if tlsCreds := config.TLSCreds; tlsCreds != nil {
|
||||
opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds)))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func getClient() *bigtable.Client {
|
||||
if client == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.DataEndpoint; ep != "" {
|
||||
opts = append(opts, option.WithEndpoint(ep))
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func getAdminClient() *bigtable.AdminClient {
|
||||
if adminClient == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.AdminEndpoint; ep != "" {
|
||||
opts = append(opts, option.WithEndpoint(ep))
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.AdminClient: %v", err)
|
||||
}
|
||||
}
|
||||
return adminClient
|
||||
}
|
||||
|
||||
func getInstanceAdminClient() *bigtable.InstanceAdminClient {
|
||||
if instanceAdminClient == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.AdminEndpoint; ep != "" {
|
||||
opts = append(opts, option.WithEndpoint(ep))
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.InstanceAdminClient: %v", err)
|
||||
}
|
||||
}
|
||||
return instanceAdminClient
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
config, err = cbtconfig.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Usage = func() { usage(os.Stderr) }
|
||||
flag.Parse()
|
||||
if flag.NArg() == 0 {
|
||||
usage(os.Stderr)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *oFlag != "" {
|
||||
f, err := os.Create(*oFlag)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
os.Stdout = f
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name == flag.Arg(0) {
|
||||
if err := config.CheckFlags(cmd.Required); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.do(ctx, flag.Args()[1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Fatalf("Unknown command %q", flag.Arg(0))
|
||||
}
|
||||
|
||||
func usage(w io.Writer) {
|
||||
fmt.Fprintf(w, "Usage: %s [flags] <command> ...\n", os.Args[0])
|
||||
flag.CommandLine.SetOutput(w)
|
||||
flag.CommandLine.PrintDefaults()
|
||||
fmt.Fprintf(w, "\n%s", cmdSummary)
|
||||
}
|
||||
|
||||
var cmdSummary string // generated in init, below
|
||||
|
||||
func init() {
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0)
|
||||
for _, cmd := range commands {
|
||||
fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString(configHelp)
|
||||
cmdSummary = buf.String()
|
||||
}
|
||||
|
||||
var configHelp = `
|
||||
For convenience, values of the -project, -instance, -creds,
|
||||
-admin-endpoint and -data-endpoint flags may be specified in
|
||||
` + cbtconfig.Filename() + ` in this format:
|
||||
project = my-project-123
|
||||
instance = my-instance
|
||||
creds = path-to-account-key.json
|
||||
admin-endpoint = hostname:port
|
||||
data-endpoint = hostname:port
|
||||
All values are optional, and all will be overridden by flags.
|
||||
|
||||
cbt ` + version + ` ` + revision + ` ` + revisionDate + `
|
||||
`
|
||||
|
||||
var commands = []struct {
|
||||
Name, Desc string
|
||||
do func(context.Context, ...string)
|
||||
Usage string
|
||||
Required cbtconfig.RequiredFlags
|
||||
}{
|
||||
{
|
||||
Name: "count",
|
||||
Desc: "Count rows in a table",
|
||||
do: doCount,
|
||||
Usage: "cbt count <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createfamily",
|
||||
Desc: "Create a column family",
|
||||
do: doCreateFamily,
|
||||
Usage: "cbt createfamily <table> <family>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createtable",
|
||||
Desc: "Create a table",
|
||||
do: doCreateTable,
|
||||
Usage: "cbt createtable <table> [initial_splits...]\n" +
|
||||
" initial_splits=row A row key to be used to initially split the table " +
|
||||
"into multiple tablets. Can be repeated to create multiple splits.",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletefamily",
|
||||
Desc: "Delete a column family",
|
||||
do: doDeleteFamily,
|
||||
Usage: "cbt deletefamily <table> <family>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deleterow",
|
||||
Desc: "Delete a row",
|
||||
do: doDeleteRow,
|
||||
Usage: "cbt deleterow <table> <row>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletetable",
|
||||
Desc: "Delete a table",
|
||||
do: doDeleteTable,
|
||||
Usage: "cbt deletetable <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "doc",
|
||||
Desc: "Print godoc-suitable documentation for cbt",
|
||||
do: doDoc,
|
||||
Usage: "cbt doc",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
{
|
||||
Name: "help",
|
||||
Desc: "Print help text",
|
||||
do: doHelp,
|
||||
Usage: "cbt help [command]",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
{
|
||||
Name: "listinstances",
|
||||
Desc: "List instances in a project",
|
||||
do: doListInstances,
|
||||
Usage: "cbt listinstances",
|
||||
Required: cbtconfig.ProjectRequired,
|
||||
},
|
||||
{
|
||||
Name: "lookup",
|
||||
Desc: "Read from a single row",
|
||||
do: doLookup,
|
||||
Usage: "cbt lookup <table> <row>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
Desc: "List tables and column families",
|
||||
do: doLS,
|
||||
Usage: "cbt ls List tables\n" +
|
||||
"cbt ls <table> List column families in <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "mddoc",
|
||||
Desc: "Print documentation for cbt in Markdown format",
|
||||
do: doMDDoc,
|
||||
Usage: "cbt mddoc",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
{
|
||||
Name: "read",
|
||||
Desc: "Read rows",
|
||||
do: doRead,
|
||||
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" +
|
||||
" start=<row> Start reading at this row\n" +
|
||||
" end=<row> Stop reading before this row\n" +
|
||||
" prefix=<prefix> Read rows with this prefix\n" +
|
||||
" count=<n> Read only this many rows\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Desc: "Set value of a cell",
|
||||
do: doSet,
|
||||
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
|
||||
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
|
||||
"\n" +
|
||||
" ts is an optional integer timestamp.\n" +
|
||||
" If it cannot be parsed, the `@ts` part will be\n" +
|
||||
" interpreted as part of the value.",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "setgcpolicy",
|
||||
Desc: "Set the GC policy for a column family",
|
||||
do: doSetGCPolicy,
|
||||
Usage: "cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )\n" +
|
||||
"\n" +
|
||||
` maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
|
||||
" maxversions=<n> Maximum number of versions to preserve",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Desc: "Print the current cbt version",
|
||||
do: doVersion,
|
||||
Usage: "cbt version",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
}
|
||||
|
||||
func doCount(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt count <table>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
n := 0
|
||||
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
|
||||
n++
|
||||
return true
|
||||
}, bigtable.RowFilter(bigtable.StripValueFilter()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading rows: %v", err)
|
||||
}
|
||||
fmt.Println(n)
|
||||
}
|
||||
|
||||
func doCreateFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt createfamily <table> <family>")
|
||||
}
|
||||
err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doCreateTable(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatal("usage: cbt createtable <table> [initial_splits...]")
|
||||
}
|
||||
var err error
|
||||
if len(args) > 1 {
|
||||
splits := args[1:]
|
||||
err = getAdminClient().CreatePresplitTable(ctx, args[0], splits)
|
||||
} else {
|
||||
err = getAdminClient().CreateTable(ctx, args[0])
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deletefamily <table> <family>")
|
||||
}
|
||||
err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting column family: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteRow(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deleterow <table> <row>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
mut := bigtable.NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, args[1], mut); err != nil {
|
||||
log.Fatalf("Deleting row: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteTable(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Can't do `cbt deletetable %s`", args)
|
||||
}
|
||||
err := getAdminClient().DeleteTable(ctx, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// to break circular dependencies
|
||||
var (
|
||||
doDocFn func(ctx context.Context, args ...string)
|
||||
doHelpFn func(ctx context.Context, args ...string)
|
||||
doMDDocFn func(ctx context.Context, args ...string)
|
||||
)
|
||||
|
||||
func init() {
|
||||
doDocFn = doDocReal
|
||||
doHelpFn = doHelpReal
|
||||
doMDDocFn = doMDDocReal
|
||||
}
|
||||
|
||||
func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) }
|
||||
func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) }
|
||||
func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) }
|
||||
|
||||
func docFlags() []*flag.Flag {
|
||||
// Only include specific flags, in a specific order.
|
||||
var flags []*flag.Flag
|
||||
for _, name := range []string{"project", "instance", "creds"} {
|
||||
f := flag.Lookup(name)
|
||||
if f == nil {
|
||||
log.Fatalf("Flag not linked: -%s", name)
|
||||
}
|
||||
flags = append(flags, f)
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func doDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := docTemplate.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("Bad doc template: %v", err)
|
||||
}
|
||||
out, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatalf("Bad doc output: %v", err)
|
||||
}
|
||||
os.Stdout.Write(out)
|
||||
}
|
||||
|
||||
func indentLines(s, ind string) string {
|
||||
ss := strings.Split(s, "\n")
|
||||
for i, p := range ss {
|
||||
ss[i] = ind + p
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
|
||||
"indent": indentLines,
|
||||
}).
|
||||
Parse(`
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||
// Run "go generate" to regenerate.
|
||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
{{range .Commands}}
|
||||
{{printf "%-25s %s" .Name .Desc}}{{end}}
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
{{range .Flags}}
|
||||
-{{.Name}} string
|
||||
{{.Usage}}{{end}}
|
||||
|
||||
{{range .Commands}}
|
||||
{{.Desc}}
|
||||
|
||||
Usage:
|
||||
{{indent .Usage "\t"}}
|
||||
|
||||
|
||||
|
||||
{{end}}
|
||||
*/
|
||||
package main
|
||||
`))
|
||||
|
||||
func doHelpReal(ctx context.Context, args ...string) {
|
||||
if len(args) == 0 {
|
||||
usage(os.Stdout)
|
||||
return
|
||||
}
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name == args[0] {
|
||||
fmt.Println(cmd.Usage)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Fatalf("Don't know command %q", args[0])
|
||||
}
|
||||
|
||||
func doListInstances(ctx context.Context, args ...string) {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("usage: cbt listinstances")
|
||||
}
|
||||
is, err := getInstanceAdminClient().Instances(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of instances: %v", err)
|
||||
}
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Instance Name\tInfo\n")
|
||||
fmt.Fprintf(tw, "-------------\t----\n")
|
||||
for _, i := range is {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func doLookup(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("usage: cbt lookup <table> <row>")
|
||||
}
|
||||
table, row := args[0], args[1]
|
||||
tbl := getClient().Open(table)
|
||||
r, err := tbl.ReadRow(ctx, row)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
printRow(r)
|
||||
}
|
||||
|
||||
func printRow(r bigtable.Row) {
|
||||
fmt.Println(strings.Repeat("-", 40))
|
||||
fmt.Println(r.Key())
|
||||
|
||||
var fams []string
|
||||
for fam := range r {
|
||||
fams = append(fams, fam)
|
||||
}
|
||||
sort.Strings(fams)
|
||||
for _, fam := range fams {
|
||||
ris := r[fam]
|
||||
sort.Sort(byColumn(ris))
|
||||
for _, ri := range ris {
|
||||
ts := time.Unix(0, int64(ri.Timestamp)*1e3)
|
||||
fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000"))
|
||||
fmt.Printf(" %q\n", ri.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type byColumn []bigtable.ReadItem
|
||||
|
||||
func (b byColumn) Len() int { return len(b) }
|
||||
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
|
||||
|
||||
type byFamilyName []bigtable.FamilyInfo
|
||||
|
||||
func (b byFamilyName) Len() int { return len(b) }
|
||||
func (b byFamilyName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byFamilyName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
func doLS(ctx context.Context, args ...string) {
|
||||
switch len(args) {
|
||||
default:
|
||||
log.Fatalf("Can't do `cbt ls %s`", args)
|
||||
case 0:
|
||||
tables, err := getAdminClient().Tables(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tables)
|
||||
for _, table := range tables {
|
||||
fmt.Println(table)
|
||||
}
|
||||
case 1:
|
||||
table := args[0]
|
||||
ti, err := getAdminClient().TableInfo(ctx, table)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting table info: %v", err)
|
||||
}
|
||||
sort.Sort(byFamilyName(ti.FamilyInfos))
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Family Name\tGC Policy\n")
|
||||
fmt.Fprintf(tw, "-----------\t---------\n")
|
||||
for _, fam := range ti.FamilyInfos {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", fam.Name, fam.GCPolicy)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func doMDDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := mddocTemplate.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("Bad mddoc template: %v", err)
|
||||
}
|
||||
io.Copy(os.Stdout, &buf)
|
||||
}
|
||||
|
||||
var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{
|
||||
"indent": indentLines,
|
||||
}).
|
||||
Parse(`
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
{{range .Commands}}
|
||||
{{printf "%-25s %s" .Name .Desc}}{{end}}
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
{{range .Flags}}
|
||||
-{{.Name}} string
|
||||
{{.Usage}}{{end}}
|
||||
|
||||
{{range .Commands}}
|
||||
## {{.Desc}}
|
||||
|
||||
{{indent .Usage "\t"}}
|
||||
|
||||
|
||||
|
||||
{{end}}
|
||||
`))
|
||||
|
||||
func doRead(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatalf("usage: cbt read <table> [args ...]")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
parsed := make(map[string]string)
|
||||
for _, arg := range args[1:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "limit":
|
||||
// Be nicer; we used to support this, but renamed it to "end".
|
||||
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
|
||||
case "start", "end", "prefix", "count":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" {
|
||||
log.Fatal(`"start"/"end" may not be mixed with "prefix"`)
|
||||
}
|
||||
|
||||
var rr bigtable.RowRange
|
||||
if start, end := parsed["start"], parsed["end"]; end != "" {
|
||||
rr = bigtable.NewRange(start, end)
|
||||
} else if start != "" {
|
||||
rr = bigtable.InfiniteRange(start)
|
||||
}
|
||||
if prefix := parsed["prefix"]; prefix != "" {
|
||||
rr = bigtable.PrefixRange(prefix)
|
||||
}
|
||||
|
||||
var opts []bigtable.ReadOption
|
||||
if count := parsed["count"]; count != "" {
|
||||
n, err := strconv.ParseInt(count, 0, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad count %q: %v", count, err)
|
||||
}
|
||||
opts = append(opts, bigtable.LimitRows(n))
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Support filters.
|
||||
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
|
||||
printRow(r)
|
||||
return true
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading rows: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
|
||||
|
||||
func doSet(ctx context.Context, args ...string) {
|
||||
if len(args) < 3 {
|
||||
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
row := args[1]
|
||||
mut := bigtable.NewMutation()
|
||||
for _, arg := range args[2:] {
|
||||
m := setArg.FindStringSubmatch(arg)
|
||||
if m == nil {
|
||||
log.Fatalf("Bad set arg %q", arg)
|
||||
}
|
||||
val := m[3]
|
||||
ts := bigtable.Now()
|
||||
if i := strings.LastIndex(val, "@"); i >= 0 {
|
||||
// Try parsing a timestamp.
|
||||
n, err := strconv.ParseInt(val[i+1:], 0, 64)
|
||||
if err == nil {
|
||||
val = val[:i]
|
||||
ts = bigtable.Timestamp(n)
|
||||
}
|
||||
}
|
||||
mut.Set(m[1], m[2], ts, []byte(val))
|
||||
}
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
log.Fatalf("Applying mutation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doSetGCPolicy(ctx context.Context, args ...string) {
|
||||
if len(args) < 3 {
|
||||
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
|
||||
}
|
||||
table := args[0]
|
||||
fam := args[1]
|
||||
|
||||
var pol bigtable.GCPolicy
|
||||
switch p := args[2]; {
|
||||
case strings.HasPrefix(p, "maxage="):
|
||||
d, err := parseDuration(p[7:])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pol = bigtable.MaxAgePolicy(d)
|
||||
case strings.HasPrefix(p, "maxversions="):
|
||||
n, err := strconv.ParseUint(p[12:], 10, 16)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pol = bigtable.MaxVersionsPolicy(int(n))
|
||||
default:
|
||||
log.Fatalf("Bad GC policy %q", p)
|
||||
}
|
||||
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
|
||||
log.Fatalf("Setting GC policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// parseDuration parses a duration string.
|
||||
// It is similar to Go's time.ParseDuration, except with a different set of supported units,
|
||||
// and only simple formats supported.
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// [0-9]+[a-z]+
|
||||
|
||||
// Split [0-9]+ from [a-z]+.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
ds, u := s[:i], s[i:]
|
||||
if ds == "" || u == "" {
|
||||
return 0, fmt.Errorf("invalid duration %q", s)
|
||||
}
|
||||
// Parse them.
|
||||
d, err := strconv.ParseUint(ds, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration %q: %v", s, err)
|
||||
}
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unknown unit %q in duration %q", u, s)
|
||||
}
|
||||
if d > uint64((1<<63-1)/unit) {
|
||||
// overflow
|
||||
return 0, fmt.Errorf("invalid duration %q overflows", s)
|
||||
}
|
||||
return time.Duration(d) * unit, nil
|
||||
}
|
||||
|
||||
var unitMap = map[string]time.Duration{
|
||||
"ms": time.Millisecond,
|
||||
"s": time.Second,
|
||||
"m": time.Minute,
|
||||
"h": time.Hour,
|
||||
"d": 24 * time.Hour,
|
||||
}
|
||||
|
||||
func doVersion(ctx context.Context, args ...string) {
|
||||
fmt.Printf("%s %s %s\n", version, revision, revisionDate)
|
||||
}
|
||||
59
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
Normal file
59
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestParseDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
// out or fail are mutually exclusive
|
||||
out time.Duration
|
||||
fail bool
|
||||
}{
|
||||
{in: "10ms", out: 10 * time.Millisecond},
|
||||
{in: "3s", out: 3 * time.Second},
|
||||
{in: "60m", out: 60 * time.Minute},
|
||||
{in: "12h", out: 12 * time.Hour},
|
||||
{in: "7d", out: 168 * time.Hour},
|
||||
|
||||
{in: "", fail: true},
|
||||
{in: "0", fail: true},
|
||||
{in: "7ns", fail: true},
|
||||
{in: "14mo", fail: true},
|
||||
{in: "3.5h", fail: true},
|
||||
{in: "106752d", fail: true}, // overflow
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got, err := parseDuration(tc.in)
|
||||
if !tc.fail && err != nil {
|
||||
t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err)
|
||||
continue
|
||||
}
|
||||
if tc.fail && err == nil {
|
||||
t.Errorf("parseDuration(%q) did not fail", tc.in)
|
||||
continue
|
||||
}
|
||||
if tc.fail {
|
||||
continue
|
||||
}
|
||||
if got != tc.out {
|
||||
t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
191
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
Normal file
191
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||
// Run "go generate" to regenerate.
|
||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
|
||||
count Count rows in a table
|
||||
createfamily Create a column family
|
||||
createtable Create a table
|
||||
deletefamily Delete a column family
|
||||
deleterow Delete a row
|
||||
deletetable Delete a table
|
||||
doc Print godoc-suitable documentation for cbt
|
||||
help Print help text
|
||||
listinstances List instances in a project
|
||||
lookup Read from a single row
|
||||
ls List tables and column families
|
||||
mddoc Print documentation for cbt in Markdown format
|
||||
read Read rows
|
||||
set Set value of a cell
|
||||
setgcpolicy Set the GC policy for a column family
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
|
||||
-project string
|
||||
project ID
|
||||
-instance string
|
||||
Cloud Bigtable instance
|
||||
-creds string
|
||||
if set, use application credentials in this file
|
||||
|
||||
|
||||
Count rows in a table
|
||||
|
||||
Usage:
|
||||
cbt count <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Create a column family
|
||||
|
||||
Usage:
|
||||
cbt createfamily <table> <family>
|
||||
|
||||
|
||||
|
||||
|
||||
Create a table
|
||||
|
||||
Usage:
|
||||
cbt createtable <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a column family
|
||||
|
||||
Usage:
|
||||
cbt deletefamily <table> <family>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a row
|
||||
|
||||
Usage:
|
||||
cbt deleterow <table> <row>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a table
|
||||
|
||||
Usage:
|
||||
cbt deletetable <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Print godoc-suitable documentation for cbt
|
||||
|
||||
Usage:
|
||||
cbt doc
|
||||
|
||||
|
||||
|
||||
|
||||
Print help text
|
||||
|
||||
Usage:
|
||||
cbt help [command]
|
||||
|
||||
|
||||
|
||||
|
||||
List instances in a project
|
||||
|
||||
Usage:
|
||||
cbt listinstances
|
||||
|
||||
|
||||
|
||||
|
||||
Read from a single row
|
||||
|
||||
Usage:
|
||||
cbt lookup <table> <row>
|
||||
|
||||
|
||||
|
||||
|
||||
List tables and column families
|
||||
|
||||
Usage:
|
||||
cbt ls List tables
|
||||
cbt ls <table> List column families in <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Print documentation for cbt in Markdown format
|
||||
|
||||
Usage:
|
||||
cbt mddoc
|
||||
|
||||
|
||||
|
||||
|
||||
Read rows
|
||||
|
||||
Usage:
|
||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]
|
||||
start=<row> Start reading at this row
|
||||
end=<row> Stop reading before this row
|
||||
prefix=<prefix> Read rows with this prefix
|
||||
count=<n> Read only this many rows
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Set value of a cell
|
||||
|
||||
Usage:
|
||||
cbt set <table> <row> family:column=val[@ts] ...
|
||||
family:column=val[@ts] may be repeated to set multiple cells.
|
||||
|
||||
ts is an optional integer timestamp.
|
||||
If it cannot be parsed, the `@ts` part will be
|
||||
interpreted as part of the value.
|
||||
|
||||
|
||||
|
||||
|
||||
Set the GC policy for a column family
|
||||
|
||||
Usage:
|
||||
cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )
|
||||
|
||||
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
|
||||
maxversions=<n> Maximum number of versions to preserve
|
||||
|
||||
|
||||
|
||||
|
||||
*/
|
||||
package main
|
||||
44
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
generated
vendored
Normal file
44
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
cbtemulator launches the in-memory Cloud Bigtable server on the given address.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
host = flag.String("host", "localhost", "the address to bind to on the local machine")
|
||||
port = flag.Int("port", 9000, "the port number to bind to on the local machine")
|
||||
)
|
||||
|
||||
func main() {
|
||||
grpc.EnableTracing = false
|
||||
flag.Parse()
|
||||
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start emulator: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr)
|
||||
select {}
|
||||
}
|
||||
198
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
Normal file
198
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Loadtest does some load testing through the Go client library for Cloud Bigtable.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"cloud.google.com/go/bigtable/internal/stat"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var (
|
||||
runFor = flag.Duration("run_for", 5*time.Second,
|
||||
"how long to run the load test for; 0 to run forever until SIGTERM")
|
||||
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
|
||||
csvOutput = flag.String("csv_output", "",
|
||||
"output path for statistics in .csv format. If this file already exists it will be overwritten.")
|
||||
poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client")
|
||||
reqCount = flag.Int("req_count", 100, "number of concurrent requests")
|
||||
|
||||
config *cbtconfig.Config
|
||||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
)
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
config, err = cbtconfig.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Parse()
|
||||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if config.Creds != "" {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
|
||||
}
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var options []option.ClientOption
|
||||
if *poolSize > 1 {
|
||||
options = append(options, option.WithGRPCConnectionPool(*poolSize))
|
||||
}
|
||||
|
||||
var csvFile *os.File
|
||||
if *csvOutput != "" {
|
||||
csvFile, err = os.Create(*csvOutput)
|
||||
if err != nil {
|
||||
log.Fatalf("creating csv output file: %v", err)
|
||||
}
|
||||
defer csvFile.Close()
|
||||
log.Printf("Writing statistics to %q ...", *csvOutput)
|
||||
}
|
||||
|
||||
log.Printf("Dialing connections...")
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.AdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
// Create a scratch table.
|
||||
log.Printf("Setting up scratch table...")
|
||||
if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
|
||||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
|
||||
}
|
||||
if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
|
||||
log.Fatalf("Making scratch table column family: %v", err)
|
||||
}
|
||||
// Upon a successful run, delete the table. Don't bother checking for errors.
|
||||
defer adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
|
||||
// Also delete the table on SIGTERM.
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
s := <-c
|
||||
log.Printf("Caught %v, cleaning scratch table.", s)
|
||||
adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
os.Exit(1)
|
||||
}()
|
||||
|
||||
log.Printf("Starting load test... (run for %v)", *runFor)
|
||||
tbl := client.Open(*scratchTable)
|
||||
sem := make(chan int, *reqCount) // limit the number of requests happening at once
|
||||
var reads, writes stats
|
||||
stopTime := time.Now().Add(*runFor)
|
||||
var wg sync.WaitGroup
|
||||
for time.Now().Before(stopTime) || *runFor == 0 {
|
||||
sem <- 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
ok := true
|
||||
opStart := time.Now()
|
||||
var stats *stats
|
||||
defer func() {
|
||||
stats.Record(ok, time.Since(opStart))
|
||||
}()
|
||||
|
||||
row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows
|
||||
|
||||
switch rand.Intn(10) {
|
||||
default:
|
||||
// read
|
||||
stats = &reads
|
||||
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
|
||||
if err != nil {
|
||||
log.Printf("Error doing read: %v", err)
|
||||
ok = false
|
||||
}
|
||||
case 0, 1, 2, 3, 4:
|
||||
// write
|
||||
stats = &writes
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
|
||||
if err := tbl.Apply(context.Background(), row, mut); err != nil {
|
||||
log.Printf("Error doing mutation: %v", err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok)
|
||||
writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok)
|
||||
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg)
|
||||
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg)
|
||||
|
||||
if csvFile != nil {
|
||||
stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile)
|
||||
}
|
||||
}
|
||||
|
||||
var allStats int64 // atomic
|
||||
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
tries, ok int
|
||||
ds []time.Duration
|
||||
}
|
||||
|
||||
func (s *stats) Record(ok bool, d time.Duration) {
|
||||
s.mu.Lock()
|
||||
s.tries++
|
||||
if ok {
|
||||
s.ok++
|
||||
}
|
||||
s.ds = append(s.ds, d)
|
||||
s.mu.Unlock()
|
||||
|
||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
|
||||
log.Printf("Progress: done %d ops", n)
|
||||
}
|
||||
}
|
||||
155
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
generated
vendored
Normal file
155
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Scantest does scan-related load testing against Cloud Bigtable. The logic here
|
||||
mimics a similar test written using the Java client.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"cloud.google.com/go/bigtable/internal/stat"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
|
||||
numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans")
|
||||
rowLimit = flag.Int("row_limit", 10000, "max number of records per scan")
|
||||
|
||||
config *cbtconfig.Config
|
||||
client *bigtable.Client
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Printf("Usage: scantest [options] <table_name>\n\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
var err error
|
||||
config, err = cbtconfig.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Parse()
|
||||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if config.Creds != "" {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
|
||||
}
|
||||
if flag.NArg() != 1 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
table := flag.Arg(0)
|
||||
|
||||
log.Printf("Dialing connections...")
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
log.Printf("Starting scan test... (run for %v)", *runFor)
|
||||
tbl := client.Open(table)
|
||||
sem := make(chan int, *numScans) // limit the number of requests happening at once
|
||||
var scans stats
|
||||
|
||||
stopTime := time.Now().Add(*runFor)
|
||||
var wg sync.WaitGroup
|
||||
for time.Now().Before(stopTime) {
|
||||
sem <- 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
ok := true
|
||||
opStart := time.Now()
|
||||
defer func() {
|
||||
scans.Record(ok, time.Since(opStart))
|
||||
}()
|
||||
|
||||
// Start at a random row key
|
||||
key := fmt.Sprintf("user%d", rand.Int63())
|
||||
limit := bigtable.LimitRows(int64(*rowLimit))
|
||||
noop := func(bigtable.Row) bool { return true }
|
||||
if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil {
|
||||
log.Printf("Error during scan: %v", err)
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok)
|
||||
log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v",
|
||||
scans.ok, scans.tries, agg, throughputString(agg))
|
||||
}
|
||||
|
||||
func throughputString(agg *stat.Aggregate) string {
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
|
||||
rowLimitF := float64(*rowLimit)
|
||||
fmt.Fprintf(
|
||||
tw,
|
||||
"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n",
|
||||
rowLimitF/agg.Max.Seconds(),
|
||||
rowLimitF/agg.Median.Seconds(),
|
||||
rowLimitF/agg.Min.Seconds())
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var allStats int64 // atomic
|
||||
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
tries, ok int
|
||||
ds []time.Duration
|
||||
}
|
||||
|
||||
func (s *stats) Record(ok bool, d time.Duration) {
|
||||
s.mu.Lock()
|
||||
s.tries++
|
||||
if ok {
|
||||
s.ok++
|
||||
}
|
||||
s.ds = append(s.ds, d)
|
||||
s.mu.Unlock()
|
||||
|
||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
|
||||
log.Printf("Progress: done %d ops", n)
|
||||
}
|
||||
}
|
||||
125
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
Normal file
125
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package bigtable is an API to Google Cloud Bigtable.
|
||||
|
||||
See https://cloud.google.com/bigtable/docs/ for general product documentation.
|
||||
|
||||
Setup and Credentials
|
||||
|
||||
Use NewClient or NewAdminClient to create a client that can be used to access
|
||||
the data or admin APIs respectively. Both require credentials that have permission
|
||||
to access the Cloud Bigtable API.
|
||||
|
||||
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials
|
||||
(https://developers.google.com/accounts/docs/application-default-credentials)
|
||||
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.
|
||||
|
||||
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
|
||||
For instance, you can use service account credentials by visiting
|
||||
https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
|
||||
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
|
||||
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
|
||||
...
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.
|
||||
...
|
||||
client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx)))
|
||||
...
|
||||
Here, `google` means the golang.org/x/oauth2/google package
|
||||
and `option` means the google.golang.org/api/option package.
|
||||
|
||||
Reading
|
||||
|
||||
The principal way to read from a Bigtable is to use the ReadRows method on *Table.
|
||||
A RowRange specifies a contiguous portion of a table. A Filter may be provided through
|
||||
RowFilter to limit or transform the data that is returned.
|
||||
tbl := client.Open("mytable")
|
||||
...
|
||||
// Read all the rows starting with "com.google.",
|
||||
// but only fetch the columns in the "links" family.
|
||||
rr := bigtable.PrefixRange("com.google.")
|
||||
err := tbl.ReadRows(ctx, rr, func(r Row) bool {
|
||||
// do something with r
|
||||
return true // keep going
|
||||
}, bigtable.RowFilter(bigtable.FamilyFilter("links")))
|
||||
...
|
||||
|
||||
To read a single row, use the ReadRow helper method.
|
||||
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key
|
||||
...
|
||||
|
||||
Writing
|
||||
|
||||
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.
|
||||
The former expresses idempotent operations.
|
||||
The latter expresses non-idempotent operations and returns the new values of updated cells.
|
||||
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),
|
||||
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite
|
||||
methods on a Table.
|
||||
|
||||
For instance, to set a couple of cells in a table,
|
||||
tbl := client.Open("mytable")
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1"))
|
||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("1"))
|
||||
err := tbl.Apply(ctx, "com.google.cloud", mut)
|
||||
...
|
||||
|
||||
To increment an encoded value in one cell,
|
||||
tbl := client.Open("mytable")
|
||||
rmw := bigtable.NewReadModifyWrite()
|
||||
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org"
|
||||
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw)
|
||||
...
|
||||
|
||||
Retries
|
||||
|
||||
If a read or write operation encounters a transient error it will be retried until a successful
|
||||
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where
|
||||
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls
|
||||
will not re-scan rows that have already been processed.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
|
||||
*/
|
||||
package bigtable // import "cloud.google.com/go/bigtable"
|
||||
|
||||
// Scope constants for authentication credentials.
|
||||
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile.
|
||||
const (
|
||||
// Scope is the OAuth scope for Cloud Bigtable data operations.
|
||||
Scope = "https://www.googleapis.com/auth/bigtable.data"
|
||||
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.
|
||||
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly"
|
||||
|
||||
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations.
|
||||
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table"
|
||||
|
||||
// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations.
|
||||
InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster"
|
||||
)
|
||||
|
||||
// clientUserAgent identifies the version of this package.
|
||||
// It should be bumped upon significant changes only.
|
||||
const clientUserAgent = "cbt-go/20160628"
|
||||
|
||||
// resourcePrefixHeader is the name of the metadata header used to indicate
|
||||
// the resource being operated on.
|
||||
const resourcePrefixHeader = "google-cloud-resource-prefix"
|
||||
221
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
Normal file
221
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var legacyUseProd string
|
||||
var integrationConfig IntegrationTestConfig
|
||||
|
||||
func init() {
|
||||
c := &integrationConfig
|
||||
|
||||
flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator")
|
||||
flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port")
|
||||
flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port")
|
||||
flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test")
|
||||
flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use")
|
||||
flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use")
|
||||
flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create")
|
||||
|
||||
// Backwards compat
|
||||
flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`)
|
||||
|
||||
}
|
||||
|
||||
// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing
|
||||
type IntegrationTestConfig struct {
|
||||
UseProd bool
|
||||
AdminEndpoint string
|
||||
DataEndpoint string
|
||||
Project string
|
||||
Instance string
|
||||
Cluster string
|
||||
Table string
|
||||
}
|
||||
|
||||
// IntegrationEnv represents a testing environment.
|
||||
// The environment can be implemented using production or an emulator
|
||||
type IntegrationEnv interface {
|
||||
Config() IntegrationTestConfig
|
||||
NewAdminClient() (*AdminClient, error)
|
||||
// NewInstanceAdminClient will return nil if instance administration is unsupported in this environment
|
||||
NewInstanceAdminClient() (*InstanceAdminClient, error)
|
||||
NewClient() (*Client, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
// NewIntegrationEnv creates a new environment based on the command line args
|
||||
func NewIntegrationEnv() (IntegrationEnv, error) {
|
||||
c := integrationConfig
|
||||
|
||||
if legacyUseProd != "" {
|
||||
fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*")
|
||||
parts := strings.SplitN(legacyUseProd, ",", 3)
|
||||
c.UseProd = true
|
||||
c.Project = parts[0]
|
||||
c.Instance = parts[1]
|
||||
c.Table = parts[2]
|
||||
}
|
||||
|
||||
if integrationConfig.UseProd {
|
||||
return NewProdEnv(c)
|
||||
} else {
|
||||
return NewEmulatedEnv(c)
|
||||
}
|
||||
}
|
||||
|
||||
// EmulatedEnv encapsulates the state of an emulator
|
||||
type EmulatedEnv struct {
|
||||
config IntegrationTestConfig
|
||||
server *bttest.Server
|
||||
}
|
||||
|
||||
// NewEmulatedEnv builds and starts the emulator based environment
|
||||
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
|
||||
srv, err := bttest.NewServer("127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.Project == "" {
|
||||
config.Project = "project"
|
||||
}
|
||||
if config.Instance == "" {
|
||||
config.Instance = "instance"
|
||||
}
|
||||
if config.Table == "" {
|
||||
config.Table = "mytable"
|
||||
}
|
||||
config.AdminEndpoint = srv.Addr
|
||||
config.DataEndpoint = srv.Addr
|
||||
|
||||
env := &EmulatedEnv{
|
||||
config: config,
|
||||
server: srv,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// Close stops & cleans up the emulator
|
||||
func (e *EmulatedEnv) Close() {
|
||||
e.server.Close()
|
||||
}
|
||||
|
||||
// Config gets the config used to build this environment
|
||||
func (e *EmulatedEnv) Config() IntegrationTestConfig {
|
||||
return e.config
|
||||
}
|
||||
|
||||
// NewAdminClient builds a new connected admin client for this environment
|
||||
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn))
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented.
|
||||
func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewClient builds a new connected data client for this environment
|
||||
func (e *EmulatedEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn))
|
||||
}
|
||||
|
||||
// ProdEnv encapsulates the state necessary to connect to the external Bigtable service
|
||||
type ProdEnv struct {
|
||||
config IntegrationTestConfig
|
||||
}
|
||||
|
||||
// NewProdEnv builds the environment representation
|
||||
func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) {
|
||||
if config.Project == "" {
|
||||
return nil, errors.New("Project not set")
|
||||
}
|
||||
if config.Instance == "" {
|
||||
return nil, errors.New("Instance not set")
|
||||
}
|
||||
if config.Table == "" {
|
||||
return nil, errors.New("Table not set")
|
||||
}
|
||||
|
||||
return &ProdEnv{config}, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for production environments
|
||||
func (e *ProdEnv) Close() {}
|
||||
|
||||
// Config gets the config used to build this environment
|
||||
func (e *ProdEnv) Config() IntegrationTestConfig {
|
||||
return e.config
|
||||
}
|
||||
|
||||
// NewAdminClient builds a new connected admin client for this environment
|
||||
func (e *ProdEnv) NewAdminClient() (*AdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
var clientOpts []option.ClientOption
|
||||
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||
}
|
||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient returns a new connected instance admin client for this environment
|
||||
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
var clientOpts []option.ClientOption
|
||||
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||
}
|
||||
return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...)
|
||||
}
|
||||
|
||||
// NewClient builds a connected data client for this environment
|
||||
func (e *ProdEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
var clientOpts []option.ClientOption
|
||||
if endpoint := e.config.DataEndpoint; endpoint != "" {
|
||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||
}
|
||||
return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
||||
}
|
||||
318
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
Normal file
318
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
)
|
||||
|
||||
// A Filter represents a row filter.
|
||||
type Filter interface {
|
||||
String() string
|
||||
proto() *btpb.RowFilter
|
||||
}
|
||||
|
||||
// ChainFilters returns a filter that applies a sequence of filters.
|
||||
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }
|
||||
|
||||
type chainFilter struct {
|
||||
sub []Filter
|
||||
}
|
||||
|
||||
func (cf chainFilter) String() string {
|
||||
var ss []string
|
||||
for _, sf := range cf.sub {
|
||||
ss = append(ss, sf.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " | ") + ")"
|
||||
}
|
||||
|
||||
func (cf chainFilter) proto() *btpb.RowFilter {
|
||||
chain := &btpb.RowFilter_Chain{}
|
||||
for _, sf := range cf.sub {
|
||||
chain.Filters = append(chain.Filters, sf.proto())
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Chain_{chain},
|
||||
}
|
||||
}
|
||||
|
||||
// InterleaveFilters returns a filter that applies a set of filters in parallel
|
||||
// and interleaves the results.
|
||||
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }
|
||||
|
||||
type interleaveFilter struct {
|
||||
sub []Filter
|
||||
}
|
||||
|
||||
func (ilf interleaveFilter) String() string {
|
||||
var ss []string
|
||||
for _, sf := range ilf.sub {
|
||||
ss = append(ss, sf.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " + ") + ")"
|
||||
}
|
||||
|
||||
func (ilf interleaveFilter) proto() *btpb.RowFilter {
|
||||
inter := &btpb.RowFilter_Interleave{}
|
||||
for _, sf := range ilf.sub {
|
||||
inter.Filters = append(inter.Filters, sf.proto())
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
||||
}
|
||||
}
|
||||
|
||||
// RowKeyFilter returns a filter that matches cells from rows whose
|
||||
// key matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }
|
||||
|
||||
type rowKeyFilter string
|
||||
|
||||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
|
||||
|
||||
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
|
||||
}
|
||||
|
||||
// FamilyFilter returns a filter that matches cells whose family name
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) }
|
||||
|
||||
type familyFilter string
|
||||
|
||||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
|
||||
|
||||
func (ff familyFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
|
||||
}
|
||||
|
||||
// ColumnFilter returns a filter that matches cells whose column name
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) }
|
||||
|
||||
type columnFilter string
|
||||
|
||||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
|
||||
|
||||
func (cf columnFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
|
||||
}
|
||||
|
||||
// ValueFilter returns a filter that matches cells whose value
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func ValueFilter(pattern string) Filter { return valueFilter(pattern) }
|
||||
|
||||
type valueFilter string
|
||||
|
||||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
|
||||
|
||||
func (vf valueFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
|
||||
}
|
||||
|
||||
// LatestNFilter returns a filter that matches the most recent N cells in each column.
|
||||
func LatestNFilter(n int) Filter { return latestNFilter(n) }
|
||||
|
||||
type latestNFilter int32
|
||||
|
||||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
|
||||
|
||||
func (lnf latestNFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
|
||||
}
|
||||
|
||||
// StripValueFilter returns a filter that replaces each value with the empty string.
|
||||
func StripValueFilter() Filter { return stripValueFilter{} }
|
||||
|
||||
type stripValueFilter struct{}
|
||||
|
||||
func (stripValueFilter) String() string { return "strip_value()" }
|
||||
func (stripValueFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
|
||||
}
|
||||
|
||||
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero
|
||||
// time means no bound.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter {
|
||||
trf := timestampRangeFilter{}
|
||||
if !startTime.IsZero() {
|
||||
trf.startTime = Time(startTime)
|
||||
}
|
||||
if !endTime.IsZero() {
|
||||
trf.endTime = Time(endTime)
|
||||
}
|
||||
return trf
|
||||
}
|
||||
|
||||
// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds,
|
||||
// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter {
|
||||
return timestampRangeFilter{startTime, endTime}
|
||||
}
|
||||
|
||||
type timestampRangeFilter struct {
|
||||
startTime Timestamp
|
||||
endTime Timestamp
|
||||
}
|
||||
|
||||
func (trf timestampRangeFilter) String() string {
|
||||
return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime)
|
||||
}
|
||||
|
||||
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_TimestampRangeFilter{
|
||||
&btpb.TimestampRange{
|
||||
int64(trf.startTime.TruncateToMilliseconds()),
|
||||
int64(trf.endTime.TruncateToMilliseconds()),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single
|
||||
// family, as specified by an inclusive start qualifier and exclusive end qualifier.
|
||||
func ColumnRangeFilter(family, start, end string) Filter {
|
||||
return columnRangeFilter{family, start, end}
|
||||
}
|
||||
|
||||
type columnRangeFilter struct {
|
||||
family string
|
||||
start string
|
||||
end string
|
||||
}
|
||||
|
||||
func (crf columnRangeFilter) String() string {
|
||||
return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end)
|
||||
}
|
||||
|
||||
func (crf columnRangeFilter) proto() *btpb.RowFilter {
|
||||
r := &btpb.ColumnRange{FamilyName: crf.family}
|
||||
if crf.start != "" {
|
||||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)}
|
||||
}
|
||||
if crf.end != "" {
|
||||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)}
|
||||
}
|
||||
return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}}
|
||||
}
|
||||
|
||||
// ValueRangeFilter returns a filter that matches cells with values that fall within
|
||||
// the given range, as specified by an inclusive start value and exclusive end value.
|
||||
func ValueRangeFilter(start, end []byte) Filter {
|
||||
return valueRangeFilter{start, end}
|
||||
}
|
||||
|
||||
type valueRangeFilter struct {
|
||||
start []byte
|
||||
end []byte
|
||||
}
|
||||
|
||||
func (vrf valueRangeFilter) String() string {
|
||||
return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end)
|
||||
}
|
||||
|
||||
func (vrf valueRangeFilter) proto() *btpb.RowFilter {
|
||||
r := &btpb.ValueRange{}
|
||||
if vrf.start != nil {
|
||||
r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start}
|
||||
}
|
||||
if vrf.end != nil {
|
||||
r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end}
|
||||
}
|
||||
return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}}
|
||||
}
|
||||
|
||||
// ConditionFilter returns a filter that evaluates to one of two possible filters depending
|
||||
// on whether or not the given predicate filter matches at least one cell.
|
||||
// If the matched filter is nil then no results will be returned.
|
||||
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
|
||||
// true and false filters, which may lead to inconsistent or unexpected
|
||||
// results. Additionally, condition filters have poor performance, especially
|
||||
// when filters are set for the false condition.
|
||||
func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter {
|
||||
return conditionFilter{predicateFilter, trueFilter, falseFilter}
|
||||
}
|
||||
|
||||
type conditionFilter struct {
|
||||
predicateFilter Filter
|
||||
trueFilter Filter
|
||||
falseFilter Filter
|
||||
}
|
||||
|
||||
func (cf conditionFilter) String() string {
|
||||
return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter)
|
||||
}
|
||||
|
||||
func (cf conditionFilter) proto() *btpb.RowFilter {
|
||||
var tf *btpb.RowFilter
|
||||
var ff *btpb.RowFilter
|
||||
if cf.trueFilter != nil {
|
||||
tf = cf.trueFilter.proto()
|
||||
}
|
||||
if cf.falseFilter != nil {
|
||||
ff = cf.falseFilter.proto()
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
&btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{
|
||||
cf.predicateFilter.proto(),
|
||||
tf,
|
||||
ff,
|
||||
}}}
|
||||
}
|
||||
|
||||
// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells.
|
||||
func CellsPerRowOffsetFilter(n int) Filter {
|
||||
return cellsPerRowOffsetFilter(n)
|
||||
}
|
||||
|
||||
type cellsPerRowOffsetFilter int32
|
||||
|
||||
func (cof cellsPerRowOffsetFilter) String() string {
|
||||
return fmt.Sprintf("cells_per_row_offset(%d)", cof)
|
||||
}
|
||||
|
||||
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}}
|
||||
}
|
||||
|
||||
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
|
||||
func CellsPerRowLimitFilter(n int) Filter {
|
||||
return cellsPerRowLimitFilter(n)
|
||||
}
|
||||
|
||||
type cellsPerRowLimitFilter int32
|
||||
|
||||
func (clf cellsPerRowLimitFilter) String() string {
|
||||
return fmt.Sprintf("cells_per_row_limit(%d)", clf)
|
||||
}
|
||||
|
||||
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): More filters: sampling
|
||||
158
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
Normal file
158
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
)
|
||||
|
||||
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection.
|
||||
type GCPolicy interface {
|
||||
String() string
|
||||
proto() *bttdpb.GcRule
|
||||
}
|
||||
|
||||
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply.
|
||||
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} }
|
||||
|
||||
type intersectionPolicy struct {
|
||||
sub []GCPolicy
|
||||
}
|
||||
|
||||
func (ip intersectionPolicy) String() string {
|
||||
var ss []string
|
||||
for _, sp := range ip.sub {
|
||||
ss = append(ss, sp.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " && ") + ")"
|
||||
}
|
||||
|
||||
func (ip intersectionPolicy) proto() *bttdpb.GcRule {
|
||||
inter := &bttdpb.GcRule_Intersection{}
|
||||
for _, sp := range ip.sub {
|
||||
inter.Rules = append(inter.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Intersection_{inter},
|
||||
}
|
||||
}
|
||||
|
||||
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply.
|
||||
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} }
|
||||
|
||||
type unionPolicy struct {
|
||||
sub []GCPolicy
|
||||
}
|
||||
|
||||
func (up unionPolicy) String() string {
|
||||
var ss []string
|
||||
for _, sp := range up.sub {
|
||||
ss = append(ss, sp.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " || ") + ")"
|
||||
}
|
||||
|
||||
func (up unionPolicy) proto() *bttdpb.GcRule {
|
||||
union := &bttdpb.GcRule_Union{}
|
||||
for _, sp := range up.sub {
|
||||
union.Rules = append(union.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Union_{union},
|
||||
}
|
||||
}
|
||||
|
||||
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell
|
||||
// except for the most recent n.
|
||||
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) }
|
||||
|
||||
type maxVersionsPolicy int
|
||||
|
||||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
|
||||
|
||||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
|
||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
|
||||
}
|
||||
|
||||
// MaxAgePolicy returns a GC policy that applies to all cells
|
||||
// older than the given age.
|
||||
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) }
|
||||
|
||||
type maxAgePolicy time.Duration
|
||||
|
||||
var units = []struct {
|
||||
d time.Duration
|
||||
suffix string
|
||||
}{
|
||||
{24 * time.Hour, "d"},
|
||||
{time.Hour, "h"},
|
||||
{time.Minute, "m"},
|
||||
}
|
||||
|
||||
func (ma maxAgePolicy) String() string {
|
||||
d := time.Duration(ma)
|
||||
for _, u := range units {
|
||||
if d%u.d == 0 {
|
||||
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("age() > %d", d/time.Microsecond)
|
||||
}
|
||||
|
||||
func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
||||
// This doesn't handle overflows, etc.
|
||||
// Fix this if people care about GC policies over 290 years.
|
||||
ns := time.Duration(ma).Nanoseconds()
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
|
||||
Seconds: ns / 1e9,
|
||||
Nanos: int32(ns % 1e9),
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// GCRuleToString converts the given GcRule proto to a user-visible string.
|
||||
func GCRuleToString(rule *bttdpb.GcRule) string {
|
||||
if rule == nil {
|
||||
return "<default>"
|
||||
}
|
||||
var ruleStr string
|
||||
if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok {
|
||||
ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String()
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok {
|
||||
ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok {
|
||||
var chunks []string
|
||||
for _, intRule := range r.Intersection.Rules {
|
||||
chunks = append(chunks, GCRuleToString(intRule))
|
||||
}
|
||||
ruleStr += "(" + strings.Join(chunks, " && ") + ")"
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok {
|
||||
var chunks []string
|
||||
for _, unionRule := range r.Union.Rules {
|
||||
chunks = append(chunks, GCRuleToString(unionRule))
|
||||
}
|
||||
ruleStr += "(" + strings.Join(chunks, " || ") + ")"
|
||||
}
|
||||
|
||||
return ruleStr
|
||||
}
|
||||
46
vendor/cloud.google.com/go/bigtable/gc_test.go
generated
vendored
Normal file
46
vendor/cloud.google.com/go/bigtable/gc_test.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2017 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
)
|
||||
|
||||
func TestGcRuleToString(t *testing.T) {
|
||||
|
||||
intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour))
|
||||
|
||||
var tests = []struct {
|
||||
proto *bttdpb.GcRule
|
||||
want string
|
||||
}{
|
||||
{MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"},
|
||||
{MaxVersionsPolicy(5).proto(), "versions() > 5"},
|
||||
{intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"},
|
||||
{UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(),
|
||||
"((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := GCRuleToString(test.proto)
|
||||
if got != test.want {
|
||||
t.Errorf("got gc rule string: %v, wanted: %v", got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
246
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
generated
vendored
Normal file
246
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud.
|
||||
package cbtconfig
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Config represents a configuration.
|
||||
type Config struct {
|
||||
Project, Instance string // required
|
||||
Creds string // optional
|
||||
AdminEndpoint string // optional
|
||||
DataEndpoint string // optional
|
||||
CertFile string // optional
|
||||
TokenSource oauth2.TokenSource // derived
|
||||
TLSCreds credentials.TransportCredentials // derived
|
||||
}
|
||||
|
||||
type RequiredFlags uint
|
||||
|
||||
const NoneRequired RequiredFlags = 0
|
||||
const (
|
||||
ProjectRequired RequiredFlags = 1 << iota
|
||||
InstanceRequired
|
||||
)
|
||||
const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired
|
||||
|
||||
// RegisterFlags registers a set of standard flags for this config.
|
||||
// It should be called before flag.Parse.
|
||||
func (c *Config) RegisterFlags() {
|
||||
flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project")
|
||||
flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance")
|
||||
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file")
|
||||
flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint")
|
||||
flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint")
|
||||
flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file")
|
||||
}
|
||||
|
||||
// CheckFlags checks that the required config values are set.
|
||||
func (c *Config) CheckFlags(required RequiredFlags) error {
|
||||
var missing []string
|
||||
if c.CertFile != "" {
|
||||
b, err := ioutil.ReadFile(c.CertFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err)
|
||||
}
|
||||
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return fmt.Errorf("Failed to append certificates from %s", c.CertFile)
|
||||
}
|
||||
|
||||
c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp})
|
||||
}
|
||||
if required != NoneRequired {
|
||||
c.SetFromGcloud()
|
||||
}
|
||||
if required&ProjectRequired != 0 && c.Project == "" {
|
||||
missing = append(missing, "-project")
|
||||
}
|
||||
if required&InstanceRequired != 0 && c.Instance == "" {
|
||||
missing = append(missing, "-instance")
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("Missing %s", strings.Join(missing, " and "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filename returns the filename consulted for standard configuration.
|
||||
func Filename() string {
|
||||
// TODO(dsymonds): Might need tweaking for Windows.
|
||||
return filepath.Join(os.Getenv("HOME"), ".cbtrc")
|
||||
}
|
||||
|
||||
// Load loads a .cbtrc file.
|
||||
// If the file is not present, an empty config is returned.
|
||||
func Load() (*Config, error) {
|
||||
filename := Filename()
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
// silent fail if the file isn't there
|
||||
if os.IsNotExist(err) {
|
||||
return &Config{}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Reading %s: %v", filename, err)
|
||||
}
|
||||
c := new(Config)
|
||||
s := bufio.NewScanner(bytes.NewReader(data))
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
i := strings.Index(line, "=")
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("Bad line in %s: %q", filename, line)
|
||||
}
|
||||
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
|
||||
switch key {
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key)
|
||||
case "project":
|
||||
c.Project = val
|
||||
case "instance":
|
||||
c.Instance = val
|
||||
case "creds":
|
||||
c.Creds = val
|
||||
case "admin-endpoint":
|
||||
c.AdminEndpoint = val
|
||||
case "data-endpoint":
|
||||
c.DataEndpoint = val
|
||||
}
|
||||
|
||||
}
|
||||
return c, s.Err()
|
||||
}
|
||||
|
||||
type GcloudCredential struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
Expiry time.Time `json:"token_expiry"`
|
||||
}
|
||||
|
||||
func (cred *GcloudCredential) Token() *oauth2.Token {
|
||||
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry}
|
||||
}
|
||||
|
||||
type GcloudConfig struct {
|
||||
Configuration struct {
|
||||
Properties struct {
|
||||
Core struct {
|
||||
Project string `json:"project"`
|
||||
} `json:"core"`
|
||||
} `json:"properties"`
|
||||
} `json:"configuration"`
|
||||
Credential GcloudCredential `json:"credential"`
|
||||
}
|
||||
|
||||
type GcloudCmdTokenSource struct {
|
||||
Command string
|
||||
Args []string
|
||||
}
|
||||
|
||||
// Token implements the oauth2.TokenSource interface
|
||||
func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) {
|
||||
gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gcloudConfig.Credential.Token(), nil
|
||||
}
|
||||
|
||||
// LoadGcloudConfig retrieves the gcloud configuration values we need use via the
|
||||
// 'config-helper' command
|
||||
func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) {
|
||||
out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not retrieve gcloud configuration")
|
||||
}
|
||||
|
||||
var gcloudConfig GcloudConfig
|
||||
if err := json.Unmarshal(out, &gcloudConfig); err != nil {
|
||||
return nil, fmt.Errorf("Could not parse gcloud configuration")
|
||||
}
|
||||
|
||||
return &gcloudConfig, nil
|
||||
}
|
||||
|
||||
// SetFromGcloud retrieves and sets any missing config values from the gcloud
|
||||
// configuration if possible possible
|
||||
func (c *Config) SetFromGcloud() error {
|
||||
|
||||
if c.Creds == "" {
|
||||
c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
||||
if c.Creds == "" {
|
||||
log.Printf("-creds flag unset, will use gcloud credential")
|
||||
}
|
||||
} else {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds)
|
||||
}
|
||||
|
||||
if c.Project == "" {
|
||||
log.Printf("-project flag unset, will use gcloud active project")
|
||||
}
|
||||
|
||||
if c.Creds != "" && c.Project != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
gcloudCmd := "gcloud"
|
||||
if runtime.GOOS == "windows" {
|
||||
gcloudCmd = gcloudCmd + ".cmd"
|
||||
}
|
||||
|
||||
gcloudCmdArgs := []string{"config", "config-helper",
|
||||
"--format=json(configuration.properties.core.project,credential)"}
|
||||
|
||||
gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" {
|
||||
log.Printf("gcloud active project is \"%s\"",
|
||||
gcloudConfig.Configuration.Properties.Core.Project)
|
||||
c.Project = gcloudConfig.Configuration.Properties.Core.Project
|
||||
}
|
||||
|
||||
if c.Creds == "" {
|
||||
c.TokenSource = oauth2.ReuseTokenSource(
|
||||
gcloudConfig.Credential.Token(),
|
||||
&GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
106
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
generated
vendored
Normal file
106
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
|
||||
package gax
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type CallOption interface {
|
||||
Resolve(*CallSettings)
|
||||
}
|
||||
|
||||
type callOptions []CallOption
|
||||
|
||||
func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
|
||||
for _, opt := range opts {
|
||||
opt.Resolve(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Encapsulates the call settings for a particular API call.
|
||||
type CallSettings struct {
|
||||
Timeout time.Duration
|
||||
RetrySettings RetrySettings
|
||||
}
|
||||
|
||||
// Per-call configurable settings for retrying upon transient failure.
|
||||
type RetrySettings struct {
|
||||
RetryCodes map[codes.Code]bool
|
||||
BackoffSettings BackoffSettings
|
||||
}
|
||||
|
||||
// Parameters to the exponential backoff algorithm for retrying.
|
||||
type BackoffSettings struct {
|
||||
DelayTimeoutSettings MultipliableDuration
|
||||
RPCTimeoutSettings MultipliableDuration
|
||||
}
|
||||
|
||||
type MultipliableDuration struct {
|
||||
Initial time.Duration
|
||||
Max time.Duration
|
||||
Multiplier float64
|
||||
}
|
||||
|
||||
func (w CallSettings) Resolve(s *CallSettings) {
|
||||
s.Timeout = w.Timeout
|
||||
s.RetrySettings = w.RetrySettings
|
||||
|
||||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes))
|
||||
for key, value := range w.RetrySettings.RetryCodes {
|
||||
s.RetrySettings.RetryCodes[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
type withRetryCodes []codes.Code
|
||||
|
||||
func (w withRetryCodes) Resolve(s *CallSettings) {
|
||||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool)
|
||||
for _, code := range w {
|
||||
s.RetrySettings.RetryCodes[code] = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryCodes sets a list of Google API canonical error codes upon which a
|
||||
// retry should be attempted.
|
||||
func WithRetryCodes(retryCodes []codes.Code) CallOption {
|
||||
return withRetryCodes(retryCodes)
|
||||
}
|
||||
|
||||
type withDelayTimeoutSettings MultipliableDuration
|
||||
|
||||
func (w withDelayTimeoutSettings) Resolve(s *CallSettings) {
|
||||
s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w)
|
||||
}
|
||||
|
||||
// WithDelayTimeoutSettings specifies:
|
||||
// - The initial delay time, in milliseconds, between the completion of
|
||||
// the first failed request and the initiation of the first retrying
|
||||
// request.
|
||||
// - The multiplier by which to increase the delay time between the
|
||||
// completion of failed requests, and the initiation of the subsequent
|
||||
// retrying request.
|
||||
// - The maximum delay time, in milliseconds, between requests. When this
|
||||
// value is reached, `RetryDelayMultiplier` will no longer be used to
|
||||
// increase delay time.
|
||||
func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption {
|
||||
return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier})
|
||||
}
|
||||
84
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
generated
vendored
Normal file
84
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
|
||||
package gax
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// A user defined call stub.
|
||||
type APICall func(context.Context) error
|
||||
|
||||
// scaleDuration returns the product of a and mult.
|
||||
func scaleDuration(a time.Duration, mult float64) time.Duration {
|
||||
ns := float64(a) * mult
|
||||
return time.Duration(ns)
|
||||
}
|
||||
|
||||
// invokeWithRetry calls stub using an exponential backoff retry mechanism
|
||||
// based on the values provided in callSettings.
|
||||
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error {
|
||||
retrySettings := callSettings.RetrySettings
|
||||
backoffSettings := callSettings.RetrySettings.BackoffSettings
|
||||
delay := backoffSettings.DelayTimeoutSettings.Initial
|
||||
for {
|
||||
// If the deadline is exceeded...
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
err := stub(ctx)
|
||||
code := grpc.Code(err)
|
||||
if code == codes.OK {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !retrySettings.RetryCodes[code] {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sleep a random amount up to the current delay
|
||||
d := time.Duration(rand.Int63n(int64(delay)))
|
||||
delayCtx, _ := context.WithTimeout(ctx, delay)
|
||||
logger.Printf("Retryable error: %v, retrying in %v", err, d)
|
||||
<-delayCtx.Done()
|
||||
|
||||
delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
|
||||
if delay > backoffSettings.DelayTimeoutSettings.Max {
|
||||
delay = backoffSettings.DelayTimeoutSettings.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invoke calls stub with a child of context modified by the specified options.
|
||||
func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error {
|
||||
settings := &CallSettings{}
|
||||
callOptions(opts).Resolve(settings)
|
||||
if len(settings.RetrySettings.RetryCodes) > 0 {
|
||||
return invokeWithRetry(ctx, stub, *settings)
|
||||
}
|
||||
return stub(ctx)
|
||||
}
|
||||
49
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
Normal file
49
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package gax
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func TestRandomizedDelays(t *testing.T) {
|
||||
max := 200 * time.Millisecond
|
||||
settings := []CallOption{
|
||||
WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}),
|
||||
WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5),
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(1 * time.Second)
|
||||
ctx, _ := context.WithDeadline(context.Background(), deadline)
|
||||
var invokeTime time.Time
|
||||
Invoke(ctx, func(childCtx context.Context) error {
|
||||
// Keep failing, make sure we never slept more than max (plus a fudge factor)
|
||||
if !invokeTime.IsZero() {
|
||||
if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) {
|
||||
t.Logf("Slept too long. Got: %v, want: %v", got, max)
|
||||
}
|
||||
}
|
||||
invokeTime = time.Now()
|
||||
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90
|
||||
errf := grpc.Errorf
|
||||
return errf(codes.Unavailable, "")
|
||||
}, settings...)
|
||||
}
|
||||
48
vendor/cloud.google.com/go/bigtable/internal/option/option.go
generated
vendored
Normal file
48
vendor/cloud.google.com/go/bigtable/internal/option/option.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package option contains common code for dealing with client options.
|
||||
package option
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// DefaultClientOptions returns the default client options to use for the
|
||||
// client's gRPC connection.
|
||||
func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) {
|
||||
var o []option.ClientOption
|
||||
// Check the environment variables for the bigtable emulator.
|
||||
// Dial it directly and don't pass any credentials.
|
||||
if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" {
|
||||
conn, err := grpc.Dial(addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("emulator grpc.Dial: %v", err)
|
||||
}
|
||||
o = []option.ClientOption{option.WithGRPCConn(conn)}
|
||||
} else {
|
||||
o = []option.ClientOption{
|
||||
option.WithEndpoint(endpoint),
|
||||
option.WithScopes(scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
144
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
Normal file
144
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stat
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
type byDuration []time.Duration
|
||||
|
||||
func (data byDuration) Len() int { return len(data) }
|
||||
func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] }
|
||||
func (data byDuration) Less(i, j int) bool { return data[i] < data[j] }
|
||||
|
||||
// quantile returns a value representing the kth of q quantiles.
|
||||
// May alter the order of data.
|
||||
func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) {
|
||||
if len(data) < 1 {
|
||||
return 0, false
|
||||
}
|
||||
if k > q {
|
||||
return 0, false
|
||||
}
|
||||
if k < 0 || q < 1 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
sort.Sort(byDuration(data))
|
||||
|
||||
if k == 0 {
|
||||
return data[0], true
|
||||
}
|
||||
if k == q {
|
||||
return data[len(data)-1], true
|
||||
}
|
||||
|
||||
bucketSize := float64(len(data)-1) / float64(q)
|
||||
i := float64(k) * bucketSize
|
||||
|
||||
lower := int(math.Trunc(i))
|
||||
var upper int
|
||||
if i > float64(lower) && lower+1 < len(data) {
|
||||
// If the quantile lies between two elements
|
||||
upper = lower + 1
|
||||
} else {
|
||||
upper = lower
|
||||
}
|
||||
weightUpper := i - float64(lower)
|
||||
weightLower := 1 - weightUpper
|
||||
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
|
||||
}
|
||||
|
||||
type Aggregate struct {
|
||||
Name string
|
||||
Count, Errors int
|
||||
Min, Median, Max time.Duration
|
||||
P75, P90, P95, P99 time.Duration // percentiles
|
||||
}
|
||||
|
||||
// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data.
|
||||
func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate {
|
||||
agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount}
|
||||
|
||||
if len(latencies) == 0 {
|
||||
return nil
|
||||
}
|
||||
var ok bool
|
||||
if agg.Min, ok = quantile(latencies, 0, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.Median, ok = quantile(latencies, 1, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.Max, ok = quantile(latencies, 2, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P75, ok = quantile(latencies, 75, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P90, ok = quantile(latencies, 90, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P95, ok = quantile(latencies, 95, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P99, ok = quantile(latencies, 99, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
return &agg
|
||||
}
|
||||
|
||||
func (agg *Aggregate) String() string {
|
||||
if agg == nil {
|
||||
return "no data"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
|
||||
fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n",
|
||||
agg.Min, agg.Median, agg.Max, agg.P95, agg.P99)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// WriteCSV writes a csv file to the given Writer,
|
||||
// with a header row and one row per aggregate.
|
||||
func WriteCSV(aggs []*Aggregate, iow io.Writer) error {
|
||||
w := csv.NewWriter(iow)
|
||||
defer w.Flush()
|
||||
err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, agg := range aggs {
|
||||
err = w.Write([]string{
|
||||
agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors),
|
||||
agg.Min.String(), agg.Median.String(), agg.Max.String(),
|
||||
agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
250
vendor/cloud.google.com/go/bigtable/reader.go
generated
vendored
Normal file
250
vendor/cloud.google.com/go/bigtable/reader.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
)
|
||||
|
||||
// A Row is returned by ReadRows. The map is keyed by column family (the prefix
|
||||
// of the column name before the colon). The values are the returned ReadItems
|
||||
// for that column family in the order returned by Read.
|
||||
type Row map[string][]ReadItem
|
||||
|
||||
// Key returns the row's key, or "" if the row is empty.
|
||||
func (r Row) Key() string {
|
||||
for _, items := range r {
|
||||
if len(items) > 0 {
|
||||
return items[0].Row
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.
|
||||
type ReadItem struct {
|
||||
Row, Column string
|
||||
Timestamp Timestamp
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// The current state of the read rows state machine.
|
||||
type rrState int64
|
||||
|
||||
const (
|
||||
newRow rrState = iota
|
||||
rowInProgress
|
||||
cellInProgress
|
||||
)
|
||||
|
||||
// chunkReader handles cell chunks from the read rows response and combines
|
||||
// them into full Rows.
|
||||
type chunkReader struct {
|
||||
state rrState
|
||||
curKey []byte
|
||||
curFam string
|
||||
curQual []byte
|
||||
curTS int64
|
||||
curVal []byte
|
||||
curRow Row
|
||||
lastKey string
|
||||
}
|
||||
|
||||
// newChunkReader returns a new chunkReader for handling read rows responses.
|
||||
func newChunkReader() *chunkReader {
|
||||
return &chunkReader{state: newRow}
|
||||
}
|
||||
|
||||
// Process takes a cell chunk and returns a new Row if the given chunk
|
||||
// completes a Row, or nil otherwise.
|
||||
func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) {
|
||||
var row Row
|
||||
switch cr.state {
|
||||
case newRow:
|
||||
if err := cr.validateNewRow(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cr.curRow = make(Row)
|
||||
cr.curKey = cc.RowKey
|
||||
cr.curFam = cc.FamilyName.Value
|
||||
cr.curQual = cc.Qualifier.Value
|
||||
cr.curTS = cc.TimestampMicros
|
||||
row = cr.handleCellValue(cc)
|
||||
|
||||
case rowInProgress:
|
||||
if err := cr.validateRowInProgress(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cc.GetResetRow() {
|
||||
cr.resetToNewRow()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if cc.FamilyName != nil {
|
||||
cr.curFam = cc.FamilyName.Value
|
||||
}
|
||||
if cc.Qualifier != nil {
|
||||
cr.curQual = cc.Qualifier.Value
|
||||
}
|
||||
cr.curTS = cc.TimestampMicros
|
||||
row = cr.handleCellValue(cc)
|
||||
|
||||
case cellInProgress:
|
||||
if err := cr.validateCellInProgress(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cc.GetResetRow() {
|
||||
cr.resetToNewRow()
|
||||
return nil, nil
|
||||
}
|
||||
row = cr.handleCellValue(cc)
|
||||
}
|
||||
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// Close must be called after all cell chunks from the response
|
||||
// have been processed. An error will be returned if the reader is
|
||||
// in an invalid state, in which case the error should be propagated to the caller.
|
||||
func (cr *chunkReader) Close() error {
|
||||
if cr.state != newRow {
|
||||
return fmt.Errorf("invalid state for end of stream %q", cr.state)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleCellValue returns a Row if the cell value includes a commit, otherwise nil.
|
||||
func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row {
|
||||
if cc.ValueSize > 0 {
|
||||
// ValueSize is specified so expect a split value of ValueSize bytes
|
||||
if cr.curVal == nil {
|
||||
cr.curVal = make([]byte, 0, cc.ValueSize)
|
||||
}
|
||||
cr.curVal = append(cr.curVal, cc.Value...)
|
||||
cr.state = cellInProgress
|
||||
} else {
|
||||
// This cell is either the complete value or the last chunk of a split
|
||||
if cr.curVal == nil {
|
||||
cr.curVal = cc.Value
|
||||
} else {
|
||||
cr.curVal = append(cr.curVal, cc.Value...)
|
||||
}
|
||||
cr.finishCell()
|
||||
|
||||
if cc.GetCommitRow() {
|
||||
return cr.commitRow()
|
||||
} else {
|
||||
cr.state = rowInProgress
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) finishCell() {
|
||||
ri := ReadItem{
|
||||
Row: string(cr.curKey),
|
||||
Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual),
|
||||
Timestamp: Timestamp(cr.curTS),
|
||||
Value: cr.curVal,
|
||||
}
|
||||
cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri)
|
||||
cr.curVal = nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) commitRow() Row {
|
||||
row := cr.curRow
|
||||
cr.lastKey = cr.curRow.Key()
|
||||
cr.resetToNewRow()
|
||||
return row
|
||||
}
|
||||
|
||||
func (cr *chunkReader) resetToNewRow() {
|
||||
cr.curKey = nil
|
||||
cr.curFam = ""
|
||||
cr.curQual = nil
|
||||
cr.curVal = nil
|
||||
cr.curRow = nil
|
||||
cr.curTS = 0
|
||||
cr.state = newRow
|
||||
}
|
||||
|
||||
func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
if cc.GetResetRow() {
|
||||
return fmt.Errorf("reset_row not allowed between rows")
|
||||
}
|
||||
if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil {
|
||||
return fmt.Errorf("missing key field for new row %v", cc)
|
||||
}
|
||||
if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) {
|
||||
return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
if err := cr.validateRowStatus(cc); err != nil {
|
||||
return err
|
||||
}
|
||||
if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) {
|
||||
return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey)
|
||||
}
|
||||
if cc.FamilyName != nil && cc.Qualifier == nil {
|
||||
return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
if err := cr.validateRowStatus(cc); err != nil {
|
||||
return err
|
||||
}
|
||||
if cr.curVal == nil {
|
||||
return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc)
|
||||
}
|
||||
if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) {
|
||||
return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool {
|
||||
return cc.RowKey != nil ||
|
||||
cc.FamilyName != nil ||
|
||||
cc.Qualifier != nil ||
|
||||
cc.TimestampMicros != 0
|
||||
}
|
||||
|
||||
// Validate a RowStatus, commit or reset, if present.
|
||||
func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
// Resets can't be specified with any other part of a cell
|
||||
if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) ||
|
||||
cc.Value != nil ||
|
||||
cc.ValueSize != 0 ||
|
||||
cc.Labels != nil) {
|
||||
return fmt.Errorf("reset must not be specified with other fields %v", cc)
|
||||
}
|
||||
if cc.GetCommitRow() && cc.ValueSize > 0 {
|
||||
return fmt.Errorf("commit row found in between chunks in a cell")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
343
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
Normal file
343
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
)
|
||||
|
||||
// Indicates that a field in the proto should be omitted, rather than included
|
||||
// as a wrapped empty string.
|
||||
const nilStr = "<>"
|
||||
|
||||
func TestSingleCell(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
// All in one cell
|
||||
row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Fatalf("Missing row")
|
||||
}
|
||||
if len(row["fm"]) != 1 {
|
||||
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"]))
|
||||
}
|
||||
want := []ReadItem{ri("rk", "fm", "col", 1, "value")}
|
||||
if !reflect.DeepEqual(row["fm"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleCells(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false))
|
||||
row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Fatalf("Missing row")
|
||||
}
|
||||
|
||||
want := []ReadItem{
|
||||
ri("rs", "fm1", "col1", 0, "val1"),
|
||||
ri("rs", "fm1", "col1", 1, "val2"),
|
||||
ri("rs", "fm1", "col2", 0, "val3"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
want = []ReadItem{
|
||||
ri("rs", "fm2", "col1", 0, "val4"),
|
||||
ri("rs", "fm2", "col2", 1, "extralongval5"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitCells(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false))
|
||||
cr.Process(ccData("world", 0, false))
|
||||
row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Fatalf("Missing row")
|
||||
}
|
||||
|
||||
want := []ReadItem{
|
||||
ri("rs", "fm1", "col1", 0, "hello world"),
|
||||
ri("rs", "fm1", "col2", 0, "val2"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleRows(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
|
||||
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlankQualifier(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
|
||||
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
cr.Process(ccReset())
|
||||
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFamEmptyQualifier(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
_, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true))
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error on second chunk with no qualifier set")
|
||||
}
|
||||
}
|
||||
|
||||
// The read rows acceptance test reads a json file specifying a number of tests,
|
||||
// each consisting of one or more cell chunk text protos and one or more resulting
|
||||
// cells or errors.
|
||||
type AcceptanceTest struct {
|
||||
Tests []TestCase `json:"tests"`
|
||||
}
|
||||
|
||||
type TestCase struct {
|
||||
Name string `json:"name"`
|
||||
Chunks []string `json:"chunks"`
|
||||
Results []TestResult `json:"results"`
|
||||
}
|
||||
|
||||
type TestResult struct {
|
||||
RK string `json:"rk"`
|
||||
FM string `json:"fm"`
|
||||
Qual string `json:"qual"`
|
||||
TS int64 `json:"ts"`
|
||||
Value string `json:"value"`
|
||||
Error bool `json:"error"` // If true, expect an error. Ignore any other field.
|
||||
}
|
||||
|
||||
func TestAcceptance(t *testing.T) {
|
||||
testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not open acceptance test file %v", err)
|
||||
}
|
||||
|
||||
var accTest AcceptanceTest
|
||||
err = json.Unmarshal(testJson, &accTest)
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse acceptance test file: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range accTest.Tests {
|
||||
runTestCase(t, test)
|
||||
}
|
||||
}
|
||||
|
||||
func runTestCase(t *testing.T, test TestCase) {
|
||||
// Increment an index into the result array as we get results
|
||||
cr := newChunkReader()
|
||||
var results []TestResult
|
||||
var seenErr bool
|
||||
for _, chunkText := range test.Chunks {
|
||||
// Parse and pass each cell chunk to the ChunkReader
|
||||
cc := &btspb.ReadRowsResponse_CellChunk{}
|
||||
err := proto.UnmarshalText(chunkText, cc)
|
||||
if err != nil {
|
||||
t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err)
|
||||
return
|
||||
}
|
||||
row, err := cr.Process(cc)
|
||||
if err != nil {
|
||||
results = append(results, TestResult{Error: true})
|
||||
seenErr = true
|
||||
break
|
||||
} else {
|
||||
// Turn the Row into TestResults
|
||||
for fm, ris := range row {
|
||||
for _, ri := range ris {
|
||||
tr := TestResult{
|
||||
RK: ri.Row,
|
||||
FM: fm,
|
||||
Qual: strings.Split(ri.Column, ":")[1],
|
||||
TS: int64(ri.Timestamp),
|
||||
Value: string(ri.Value),
|
||||
}
|
||||
results = append(results, tr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only Close if we don't have an error yet, otherwise Close: is expected.
|
||||
if !seenErr {
|
||||
err := cr.Close()
|
||||
if err != nil {
|
||||
results = append(results, TestResult{Error: true})
|
||||
}
|
||||
}
|
||||
|
||||
got := toSet(results)
|
||||
want := toSet(test.Results)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func toSet(res []TestResult) map[TestResult]bool {
|
||||
set := make(map[TestResult]bool)
|
||||
for _, tr := range res {
|
||||
set[tr] = true
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// ri returns a ReadItem for the given components
|
||||
func ri(rk string, fm string, qual string, ts int64, val string) ReadItem {
|
||||
return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)}
|
||||
}
|
||||
|
||||
// cc returns a CellChunk proto
|
||||
func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {
|
||||
// The components of the cell key are wrapped and can be null or empty
|
||||
var rkWrapper []byte
|
||||
if rk == nilStr {
|
||||
rkWrapper = nil
|
||||
} else {
|
||||
rkWrapper = []byte(rk)
|
||||
}
|
||||
|
||||
var fmWrapper *wrappers.StringValue
|
||||
if fm != nilStr {
|
||||
fmWrapper = &wrappers.StringValue{Value: fm}
|
||||
} else {
|
||||
fmWrapper = nil
|
||||
}
|
||||
|
||||
var qualWrapper *wrappers.BytesValue
|
||||
if qual != nilStr {
|
||||
qualWrapper = &wrappers.BytesValue{Value: []byte(qual)}
|
||||
} else {
|
||||
qualWrapper = nil
|
||||
}
|
||||
|
||||
return &btspb.ReadRowsResponse_CellChunk{
|
||||
RowKey: rkWrapper,
|
||||
FamilyName: fmWrapper,
|
||||
Qualifier: qualWrapper,
|
||||
TimestampMicros: ts,
|
||||
Value: []byte(val),
|
||||
ValueSize: size,
|
||||
RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}}
|
||||
}
|
||||
|
||||
// ccData returns a CellChunk with only a value and size
|
||||
func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {
|
||||
return cc(nilStr, nilStr, nilStr, 0, val, size, commit)
|
||||
}
|
||||
|
||||
// ccReset returns a CellChunk with RestRow set to true
|
||||
func ccReset() *btspb.ReadRowsResponse_CellChunk {
|
||||
return &btspb.ReadRowsResponse_CellChunk{
|
||||
RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}}
|
||||
}
|
||||
370
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
Normal file
370
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
Normal file
@@ -0,0 +1,370 @@
|
||||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
|
||||
srv, err := bttest.NewServer("127.0.0.1:0", opt...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := adminClient.CreateTable(context.Background(), "table"); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
t := client.Open("table")
|
||||
|
||||
cleanupFunc := func() {
|
||||
adminClient.Close()
|
||||
client.Close()
|
||||
srv.Close()
|
||||
}
|
||||
return t, cleanupFunc, nil
|
||||
}
|
||||
|
||||
func TestRetryApply(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
errCount := 0
|
||||
code := codes.Unavailable // Will be retried
|
||||
// Intercept requests and return an error or defer to the underlying handler
|
||||
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 {
|
||||
errCount++
|
||||
return nil, grpc.Errorf(code, "")
|
||||
}
|
||||
return handler(ctx, req)
|
||||
}
|
||||
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 1, []byte("val"))
|
||||
if err := tbl.Apply(ctx, "row1", mut); err != nil {
|
||||
t.Errorf("applying single mutation with retries: %v", err)
|
||||
}
|
||||
row, err := tbl.ReadRow(ctx, "row1")
|
||||
if err != nil {
|
||||
t.Errorf("reading single value with retries: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Errorf("applying single mutation with retries: could not read back row")
|
||||
}
|
||||
|
||||
code = codes.FailedPrecondition // Won't be retried
|
||||
errCount = 0
|
||||
if err := tbl.Apply(ctx, "row", mut); err == nil {
|
||||
t.Errorf("applying single mutation with no retries: no error")
|
||||
}
|
||||
|
||||
// Check and mutate
|
||||
mutTrue := NewMutation()
|
||||
mutTrue.DeleteRow()
|
||||
mutFalse := NewMutation()
|
||||
mutFalse.Set("cf", "col", 1, []byte("val"))
|
||||
condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse)
|
||||
|
||||
errCount = 0
|
||||
code = codes.Unavailable // Will be retried
|
||||
if err := tbl.Apply(ctx, "row1", condMut); err != nil {
|
||||
t.Errorf("conditionally mutating row with retries: %v", err)
|
||||
}
|
||||
row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table
|
||||
if err != nil {
|
||||
t.Errorf("reading single value after conditional mutation: %v", err)
|
||||
}
|
||||
if row != nil {
|
||||
t.Errorf("reading single value after conditional mutation: row not deleted")
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
code = codes.FailedPrecondition // Won't be retried
|
||||
if err := tbl.Apply(ctx, "row", condMut); err == nil {
|
||||
t.Errorf("conditionally mutating row with no retries: no error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryApplyBulk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Intercept requests and delegate to an interceptor defined by the test case
|
||||
errCount := 0
|
||||
var f func(grpc.ServerStream) error
|
||||
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if strings.HasSuffix(info.FullMethod, "MutateRows") {
|
||||
return f(ss)
|
||||
}
|
||||
return handler(ctx, ss)
|
||||
}
|
||||
|
||||
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
// Test overall request failure and retries
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
if errCount < 3 {
|
||||
errCount++
|
||||
return grpc.Errorf(codes.Aborted, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 1, []byte{})
|
||||
errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut})
|
||||
if errors != nil || err != nil {
|
||||
t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err)
|
||||
}
|
||||
|
||||
// Test failures and retries in one request
|
||||
errCount = 0
|
||||
m1 := NewMutation()
|
||||
m1.Set("cf", "col", 1, []byte{})
|
||||
m2 := NewMutation()
|
||||
m2.Set("cf", "col2", 1, []byte{})
|
||||
m3 := NewMutation()
|
||||
m3.Set("cf", "col3", 1, []byte{})
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.MutateRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Retryable request failure
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 1:
|
||||
// Two mutations fail
|
||||
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
|
||||
err = nil
|
||||
case 2:
|
||||
// Two failures were retried. One will succeed.
|
||||
if want, got := 2, len(req.Entries); want != got {
|
||||
t.Errorf("2 bulk retries, got: %d, want %d", got, want)
|
||||
}
|
||||
writeMutateRowsResponse(ss, codes.OK, codes.Aborted)
|
||||
err = nil
|
||||
case 3:
|
||||
// One failure was retried and will succeed.
|
||||
if want, got := 1, len(req.Entries); want != got {
|
||||
t.Errorf("1 bulk retry, got: %d, want %d", got, want)
|
||||
}
|
||||
writeMutateRowsResponse(ss, codes.OK)
|
||||
err = nil
|
||||
}
|
||||
errCount++
|
||||
return err
|
||||
}
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
|
||||
if errors != nil || err != nil {
|
||||
t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err)
|
||||
}
|
||||
|
||||
// Test unretryable errors
|
||||
niMut := NewMutation()
|
||||
niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent
|
||||
errCount = 0
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.MutateRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Give non-idempotent mutation a retryable error code.
|
||||
// Nothing should be retried.
|
||||
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted)
|
||||
err = nil
|
||||
case 1:
|
||||
t.Errorf("unretryable errors: got one retry, want no retries")
|
||||
}
|
||||
errCount++
|
||||
return err
|
||||
}
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut})
|
||||
if err != nil {
|
||||
t.Errorf("unretryable errors: request failed %v")
|
||||
}
|
||||
want := []error{
|
||||
grpc.Errorf(codes.FailedPrecondition, ""),
|
||||
grpc.Errorf(codes.Aborted, ""),
|
||||
}
|
||||
if !reflect.DeepEqual(want, errors) {
|
||||
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
|
||||
}
|
||||
|
||||
// Test individual errors and a deadline exceeded
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
|
||||
return nil
|
||||
}
|
||||
ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
|
||||
wantErr := context.DeadlineExceeded
|
||||
if wantErr != err {
|
||||
t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr)
|
||||
}
|
||||
if errors != nil {
|
||||
t.Errorf("deadline exceeded errors: got: %v, want: nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error {
|
||||
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))}
|
||||
for i, code := range codes {
|
||||
res.Entries[i] = &btpb.MutateRowsResponse_Entry{
|
||||
Index: int64(i),
|
||||
Status: &rpcpb.Status{Code: int32(code), Message: ""},
|
||||
}
|
||||
}
|
||||
return ss.SendMsg(res)
|
||||
}
|
||||
|
||||
func TestRetainRowsAfter(t *testing.T) {
|
||||
prevRowRange := NewRange("a", "z")
|
||||
prevRowKey := "m"
|
||||
want := NewRange("m\x00", "z")
|
||||
got := prevRowRange.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
t.Errorf("range retry: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
prevRowRangeList := RowRangeList{NewRange("a", "d"), NewRange("e", "g"), NewRange("h", "l")}
|
||||
prevRowKey = "f"
|
||||
wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")}
|
||||
got = prevRowRangeList.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(wantRowRangeList, got) {
|
||||
t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList)
|
||||
}
|
||||
|
||||
prevRowList := RowList{"a", "b", "c", "d", "e", "f"}
|
||||
prevRowKey = "b"
|
||||
wantList := RowList{"c", "d", "e", "f"}
|
||||
got = prevRowList.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(wantList, got) {
|
||||
t.Errorf("list retry: got %v, want %v", got, wantList)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryReadRows(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Intercept requests and delegate to an interceptor defined by the test case
|
||||
errCount := 0
|
||||
var f func(grpc.ServerStream) error
|
||||
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if strings.HasSuffix(info.FullMethod, "ReadRows") {
|
||||
return f(ss)
|
||||
}
|
||||
return handler(ctx, ss)
|
||||
}
|
||||
|
||||
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
// Test overall request failure and retries
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.ReadRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Retryable request failure
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 1:
|
||||
// Write two rows then error
|
||||
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||
t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
|
||||
}
|
||||
writeReadRowsResponse(ss, "a", "b")
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 2:
|
||||
// Retryable request failure
|
||||
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||
t.Errorf("2 range retries: got %q, want %q", got, want)
|
||||
}
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 3:
|
||||
// Write two more rows
|
||||
writeReadRowsResponse(ss, "c", "d")
|
||||
err = nil
|
||||
}
|
||||
errCount++
|
||||
return err
|
||||
}
|
||||
|
||||
var got []string
|
||||
tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
|
||||
got = append(got, r.Key())
|
||||
return true
|
||||
})
|
||||
want := []string{"a", "b", "c", "d"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("retry range integration: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error {
|
||||
var chunks []*btpb.ReadRowsResponse_CellChunk
|
||||
for _, key := range rowKeys {
|
||||
chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{
|
||||
RowKey: []byte(key),
|
||||
FamilyName: &wrappers.StringValue{Value: "fm"},
|
||||
Qualifier: &wrappers.BytesValue{Value: []byte("col")},
|
||||
RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true},
|
||||
})
|
||||
}
|
||||
return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks})
|
||||
}
|
||||
1178
vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json
generated
vendored
Normal file
1178
vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user