Vendor dependencies for GCS

This commit is contained in:
Alexander Neumann
2017-08-05 20:17:15 +02:00
parent ba75a3884c
commit 8ca6a9a240
1228 changed files with 1769186 additions and 1 deletions

159
vendor/cloud.google.com/go/pubsub/acker.go generated vendored Normal file
View File

@@ -0,0 +1,159 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"sync"
"time"
"golang.org/x/net/context"
)
// ackBuffer stores the pending ack IDs and notifies the Dirty channel when it becomes non-empty.
type ackBuffer struct {
Dirty chan struct{}
// Close done when ackBuffer is no longer needed.
Done chan struct{}
mu sync.Mutex
pending []string
send bool
}
// Add adds ackID to the buffer.
func (buf *ackBuffer) Add(ackID string) {
buf.mu.Lock()
defer buf.mu.Unlock()
buf.pending = append(buf.pending, ackID)
// If we are transitioning into a non-empty notification state.
if buf.send && len(buf.pending) == 1 {
buf.notify()
}
}
// RemoveAll removes all ackIDs from the buffer and returns them.
func (buf *ackBuffer) RemoveAll() []string {
buf.mu.Lock()
defer buf.mu.Unlock()
ret := buf.pending
buf.pending = nil
return ret
}
// SendNotifications enables sending dirty notification on empty -> non-empty transitions.
// If the buffer is already non-empty, a notification will be sent immediately.
func (buf *ackBuffer) SendNotifications() {
buf.mu.Lock()
defer buf.mu.Unlock()
buf.send = true
// If we are transitioning into a non-empty notification state.
if len(buf.pending) > 0 {
buf.notify()
}
}
func (buf *ackBuffer) notify() {
go func() {
select {
case buf.Dirty <- struct{}{}:
case <-buf.Done:
}
}()
}
// acker acks messages in batches.
type acker struct {
s service
Ctx context.Context // The context to use when acknowledging messages.
Sub string // The full name of the subscription.
AckTick <-chan time.Time // AckTick supplies the frequency with which to make ack requests.
// Notify is called with an ack ID after the message with that ack ID
// has been processed. An ackID is considered to have been processed
// if at least one attempt has been made to acknowledge it.
Notify func(string)
ackBuffer
wg sync.WaitGroup
done chan struct{}
}
// Start intiates processing of ackIDs which are added via Add.
// Notify is called with each ackID once it has been processed.
func (a *acker) Start() {
a.done = make(chan struct{})
a.ackBuffer.Dirty = make(chan struct{})
a.ackBuffer.Done = a.done
a.wg.Add(1)
go func() {
defer a.wg.Done()
for {
select {
case <-a.ackBuffer.Dirty:
a.ack(a.ackBuffer.RemoveAll())
case <-a.AckTick:
a.ack(a.ackBuffer.RemoveAll())
case <-a.done:
return
}
}
}()
}
// Ack adds an ack id to be acked in the next batch.
func (a *acker) Ack(ackID string) {
a.ackBuffer.Add(ackID)
}
// FastMode switches acker into a mode which acks messages as they arrive, rather than waiting
// for a.AckTick.
func (a *acker) FastMode() {
a.ackBuffer.SendNotifications()
}
// Stop drops all pending messages, and releases resources before returning.
func (a *acker) Stop() {
close(a.done)
a.wg.Wait()
}
const maxAckAttempts = 2
// ack acknowledges the supplied ackIDs.
// After the acknowledgement request has completed (regardless of its success
// or failure), ids will be passed to a.Notify.
func (a *acker) ack(ids []string) {
head, tail := a.s.splitAckIDs(ids)
for len(head) > 0 {
for i := 0; i < maxAckAttempts; i++ {
if a.s.acknowledge(a.Ctx, a.Sub, head) == nil {
break
}
}
// NOTE: if retry gives up and returns an error, we simply drop
// those ack IDs. The messages will be redelivered and this is
// a documented behaviour of the API.
head, tail = a.s.splitAckIDs(tail)
}
for _, id := range ids {
a.Notify(id)
}
}

262
vendor/cloud.google.com/go/pubsub/acker_test.go generated vendored Normal file
View File

@@ -0,0 +1,262 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"reflect"
"sort"
"testing"
"time"
"golang.org/x/net/context"
)
func TestAcker(t *testing.T) {
tick := make(chan time.Time)
s := &testService{acknowledgeCalled: make(chan acknowledgeCall)}
processed := make(chan string, 10)
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
AckTick: tick,
Notify: func(ackID string) { processed <- ackID },
}
acker.Start()
checkAckProcessed := func(ackIDs []string) {
got := <-s.acknowledgeCalled
sort.Strings(got.ackIDs)
want := acknowledgeCall{
subName: "subname",
ackIDs: ackIDs,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want)
}
}
acker.Ack("a")
acker.Ack("b")
tick <- time.Time{}
checkAckProcessed([]string{"a", "b"})
acker.Ack("c")
tick <- time.Time{}
checkAckProcessed([]string{"c"})
acker.Stop()
// all IDS should have been sent to processed.
close(processed)
processedIDs := []string{}
for id := range processed {
processedIDs = append(processedIDs, id)
}
sort.Strings(processedIDs)
want := []string{"a", "b", "c"}
if !reflect.DeepEqual(processedIDs, want) {
t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want)
}
}
func TestAckerFastMode(t *testing.T) {
tick := make(chan time.Time)
s := &testService{acknowledgeCalled: make(chan acknowledgeCall)}
processed := make(chan string, 10)
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
AckTick: tick,
Notify: func(ackID string) { processed <- ackID },
}
acker.Start()
checkAckProcessed := func(ackIDs []string) {
got := <-s.acknowledgeCalled
sort.Strings(got.ackIDs)
want := acknowledgeCall{
subName: "subname",
ackIDs: ackIDs,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want)
}
}
// No ticks are sent; fast mode doesn't need them.
acker.Ack("a")
acker.Ack("b")
acker.FastMode()
checkAckProcessed([]string{"a", "b"})
acker.Ack("c")
checkAckProcessed([]string{"c"})
acker.Stop()
// all IDS should have been sent to processed.
close(processed)
processedIDs := []string{}
for id := range processed {
processedIDs = append(processedIDs, id)
}
sort.Strings(processedIDs)
want := []string{"a", "b", "c"}
if !reflect.DeepEqual(processedIDs, want) {
t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want)
}
}
// TestAckerStop checks that Stop returns immediately.
func TestAckerStop(t *testing.T) {
tick := make(chan time.Time)
s := &testService{acknowledgeCalled: make(chan acknowledgeCall, 10)}
processed := make(chan string)
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
AckTick: tick,
Notify: func(ackID string) { processed <- ackID },
}
acker.Start()
stopped := make(chan struct{})
acker.Ack("a")
go func() {
acker.Stop()
stopped <- struct{}{}
}()
// Stopped should have been written to by the time this sleep completes.
time.Sleep(time.Millisecond)
// Receiving from processed should cause Stop to subsequently return,
// so it should never be possible to read from stopped before
// processed.
select {
case <-stopped:
case <-processed:
t.Errorf("acker.Stop processed an ack id before returning")
case <-time.After(time.Millisecond):
t.Errorf("acker.Stop never returned")
}
}
type ackCallResult struct {
ackIDs []string
err error
}
type ackService struct {
service
calls []ackCallResult
t *testing.T // used for error logging.
}
func (as *ackService) acknowledge(ctx context.Context, subName string, ackIDs []string) error {
if len(as.calls) == 0 {
as.t.Fatalf("unexpected call to acknowledge: ackIDs: %v", ackIDs)
}
call := as.calls[0]
as.calls = as.calls[1:]
if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) {
as.t.Errorf("unexpected arguments to acknowledge: got: %v ; want: %v", got, want)
}
return call.err
}
// Test implementation returns the first 2 elements as head, and the rest as tail.
func (as *ackService) splitAckIDs(ids []string) ([]string, []string) {
if len(ids) < 2 {
return ids, nil
}
return ids[:2], ids[2:]
}
func TestAckerSplitsBatches(t *testing.T) {
type testCase struct {
calls []ackCallResult
}
for _, tc := range []testCase{
{
calls: []ackCallResult{
{
ackIDs: []string{"a", "b"},
},
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
{
calls: []ackCallResult{
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// On error we retry once.
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// We give up after failing twice, so we move on to the next set, "c" and "d"
{
ackIDs: []string{"c", "d"},
err: errors.New("bang"),
},
// Again, we retry once.
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
} {
s := &ackService{
t: t,
calls: tc.calls,
}
acker := &acker{
s: s,
Ctx: context.Background(),
Sub: "subname",
Notify: func(string) {},
}
acker.ack([]string{"a", "b", "c", "d", "e", "f"})
if len(s.calls) != 0 {
t.Errorf("expected ack calls did not occur: %v", s.calls)
}
}
}

View File

@@ -0,0 +1,65 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package pubsub
import (
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestPublisherSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewPublisherClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var formattedProject string = PublisherProjectPath(projectId)
var request = &pubsubpb.ListTopicsRequest{
Project: formattedProject,
}
iter := c.ListTopics(ctx, request)
if _, err := iter.Next(); err != nil && err != iterator.Done {
t.Error(err)
}
}

9
vendor/cloud.google.com/go/pubsub/apiv1/README.md generated vendored Normal file
View File

@@ -0,0 +1,9 @@
Auto-generated pubsub v1 clients
=================================
This package includes auto-generated clients for the pubsub v1 API.
Use the handwritten client (in the parent directory,
cloud.google.com/go/pubsub) in preference to this.
This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME.

45
vendor/cloud.google.com/go/pubsub/apiv1/doc.go generated vendored Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package pubsub is an experimental, auto-generated package for the
// Google Cloud Pub/Sub API.
//
// Provides reliable, many-to-many, asynchronous messaging between
// applications.
//
// Use the client at cloud.google.com/go/pubsub in preference to this.
package pubsub // import "cloud.google.com/go/pubsub/apiv1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertXGoog(ctx context.Context, val []string) context.Context {
md, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy()
md["x-goog-api-client"] = val
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
}
}

1691
vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,402 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package pubsub
import (
"math"
"time"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
publisherProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
publisherTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}")
)
// PublisherCallOptions contains the retry settings for each method of PublisherClient.
type PublisherCallOptions struct {
CreateTopic []gax.CallOption
Publish []gax.CallOption
GetTopic []gax.CallOption
ListTopics []gax.CallOption
ListTopicSubscriptions []gax.CallOption
DeleteTopic []gax.CallOption
}
func defaultPublisherClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("pubsub.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultPublisherCallOptions() *PublisherCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"messaging", "one_plus_delivery"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Canceled,
codes.Unknown,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Aborted,
codes.Internal,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &PublisherCallOptions{
CreateTopic: retry[[2]string{"default", "idempotent"}],
Publish: retry[[2]string{"messaging", "one_plus_delivery"}],
GetTopic: retry[[2]string{"default", "idempotent"}],
ListTopics: retry[[2]string{"default", "idempotent"}],
ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}],
DeleteTopic: retry[[2]string{"default", "idempotent"}],
}
}
// PublisherClient is a client for interacting with Google Cloud Pub/Sub API.
type PublisherClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
publisherClient pubsubpb.PublisherClient
// The call options for this service.
CallOptions *PublisherCallOptions
// The metadata to be sent with each request.
xGoogHeader []string
}
// NewPublisherClient creates a new publisher client.
//
// The service that an application uses to manipulate topics, and to send
// messages to a topic.
func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &PublisherClient{
conn: conn,
CallOptions: defaultPublisherCallOptions(),
publisherClient: pubsubpb.NewPublisherClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *PublisherClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *PublisherClient) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
}
// PublisherProjectPath returns the path for the project resource.
func PublisherProjectPath(project string) string {
path, err := publisherProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
}
// PublisherTopicPath returns the path for the topic resource.
func PublisherTopicPath(project, topic string) string {
path, err := publisherTopicPathTemplate.Render(map[string]string{
"project": project,
"topic": topic,
})
if err != nil {
panic(err)
}
return path
}
func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), subscription.Name)
}
func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), topic.Name)
}
// CreateTopic creates the given topic with the given name.
func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...)
var resp *pubsubpb.Topic
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// Publish adds one or more messages to the topic. Returns `NOT_FOUND` if the topic
// does not exist. The message payload must not be empty; it must contain
// either a non-empty data field, or at least one attribute.
func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...)
var resp *pubsubpb.PublishResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetTopic gets the configuration of a topic.
func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...)
var resp *pubsubpb.Topic
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListTopics lists matching topics.
func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...)
it := &TopicIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) {
var resp *pubsubpb.ListTopicsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Topics, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// ListTopicSubscriptions lists the name of the subscriptions for this topic.
func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...)
it := &StringIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
var resp *pubsubpb.ListTopicSubscriptionsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Subscriptions, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// DeleteTopic deletes the topic with the given name. Returns `NOT_FOUND` if the topic
// does not exist. After a topic is deleted, a new topic may be created with
// the same name; this is an entirely new topic with none of the old
// configuration or subscriptions. Existing subscriptions to this topic are
// not deleted, but their `topic` field is set to `_deleted-topic_`.
func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// StringIterator manages a stream of string.
type StringIterator struct {
items []string
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *StringIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *StringIterator) Next() (string, error) {
var item string
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *StringIterator) bufLen() int {
return len(it.items)
}
func (it *StringIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// TopicIterator manages a stream of *pubsubpb.Topic.
type TopicIterator struct {
items []*pubsubpb.Topic
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TopicIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TopicIterator) Next() (*pubsubpb.Topic, error) {
var item *pubsubpb.Topic
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TopicIterator) bufLen() int {
return len(it.items)
}
func (it *TopicIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@@ -0,0 +1,186 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package pubsub_test
import (
"cloud.google.com/go/pubsub/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
)
func ExampleNewPublisherClient() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExamplePublisherClient_SubscriptionIAM() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
subscription := &pubsubpb.Subscription{}
h := c.SubscriptionIAM(subscription)
policy, err := h.Policy(ctx)
if err != nil {
// TODO: Handle error.
}
//TODO: Use the IAM policy
_ = policy
}
func ExamplePublisherClient_TopicIAM() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
topic := &pubsubpb.Topic{}
h := c.TopicIAM(topic)
policy, err := h.Policy(ctx)
if err != nil {
// TODO: Handle error.
}
//TODO: Use the IAM policy
_ = policy
}
func ExamplePublisherClient_CreateTopic() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.Topic{
// TODO: Fill request struct fields.
}
resp, err := c.CreateTopic(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExamplePublisherClient_Publish() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.PublishRequest{
// TODO: Fill request struct fields.
}
resp, err := c.Publish(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExamplePublisherClient_GetTopic() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.GetTopicRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetTopic(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExamplePublisherClient_ListTopics() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.ListTopicsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTopics(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExamplePublisherClient_ListTopicSubscriptions() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.ListTopicSubscriptionsRequest{
// TODO: Fill request struct fields.
}
it := c.ListTopicSubscriptions(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExamplePublisherClient_DeleteTopic() {
ctx := context.Background()
c, err := pubsub.NewPublisherClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.DeleteTopicRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteTopic(ctx, req)
if err != nil {
// TODO: Handle error.
}
}

View File

@@ -0,0 +1,604 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package pubsub
import (
"math"
"time"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
subscriberProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
subscriberSnapshotPathTemplate = gax.MustCompilePathTemplate("projects/{project}/snapshots/{snapshot}")
subscriberSubscriptionPathTemplate = gax.MustCompilePathTemplate("projects/{project}/subscriptions/{subscription}")
subscriberTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}")
)
// SubscriberCallOptions contains the retry settings for each method of SubscriberClient.
type SubscriberCallOptions struct {
CreateSubscription []gax.CallOption
GetSubscription []gax.CallOption
UpdateSubscription []gax.CallOption
ListSubscriptions []gax.CallOption
DeleteSubscription []gax.CallOption
ModifyAckDeadline []gax.CallOption
Acknowledge []gax.CallOption
Pull []gax.CallOption
StreamingPull []gax.CallOption
ModifyPushConfig []gax.CallOption
ListSnapshots []gax.CallOption
CreateSnapshot []gax.CallOption
DeleteSnapshot []gax.CallOption
Seek []gax.CallOption
}
func defaultSubscriberClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("pubsub.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultSubscriberCallOptions() *SubscriberCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"messaging", "pull"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Canceled,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Internal,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &SubscriberCallOptions{
CreateSubscription: retry[[2]string{"default", "idempotent"}],
GetSubscription: retry[[2]string{"default", "idempotent"}],
UpdateSubscription: retry[[2]string{"default", "idempotent"}],
ListSubscriptions: retry[[2]string{"default", "idempotent"}],
DeleteSubscription: retry[[2]string{"default", "idempotent"}],
ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}],
Acknowledge: retry[[2]string{"messaging", "non_idempotent"}],
Pull: retry[[2]string{"messaging", "pull"}],
StreamingPull: retry[[2]string{"messaging", "pull"}],
ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}],
ListSnapshots: retry[[2]string{"default", "idempotent"}],
CreateSnapshot: retry[[2]string{"default", "idempotent"}],
DeleteSnapshot: retry[[2]string{"default", "idempotent"}],
Seek: retry[[2]string{"default", "non_idempotent"}],
}
}
// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API.
type SubscriberClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
subscriberClient pubsubpb.SubscriberClient
// The call options for this service.
CallOptions *SubscriberCallOptions
// The metadata to be sent with each request.
xGoogHeader []string
}
// NewSubscriberClient creates a new subscriber client.
//
// The service that an application uses to manipulate subscriptions and to
// consume messages from a subscription via the `Pull` method.
func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &SubscriberClient{
conn: conn,
CallOptions: defaultSubscriberCallOptions(),
subscriberClient: pubsubpb.NewSubscriberClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *SubscriberClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *SubscriberClient) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
}
// SubscriberProjectPath returns the path for the project resource.
func SubscriberProjectPath(project string) string {
path, err := subscriberProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
}
// SubscriberSnapshotPath returns the path for the snapshot resource.
func SubscriberSnapshotPath(project, snapshot string) string {
path, err := subscriberSnapshotPathTemplate.Render(map[string]string{
"project": project,
"snapshot": snapshot,
})
if err != nil {
panic(err)
}
return path
}
// SubscriberSubscriptionPath returns the path for the subscription resource.
func SubscriberSubscriptionPath(project, subscription string) string {
path, err := subscriberSubscriptionPathTemplate.Render(map[string]string{
"project": project,
"subscription": subscription,
})
if err != nil {
panic(err)
}
return path
}
// SubscriberTopicPath returns the path for the topic resource.
func SubscriberTopicPath(project, topic string) string {
path, err := subscriberTopicPathTemplate.Render(map[string]string{
"project": project,
"topic": topic,
})
if err != nil {
panic(err)
}
return path
}
func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), subscription.Name)
}
func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), topic.Name)
}
// CreateSubscription creates a subscription to a given topic.
// If the subscription already exists, returns `ALREADY_EXISTS`.
// If the corresponding topic doesn't exist, returns `NOT_FOUND`.
//
// If the name is not provided in the request, the server will assign a random
// name for this subscription on the same project as the topic, conforming
// to the
// [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
// The generated name is populated in the returned Subscription object.
// Note that for REST API requests, you must specify a name in the request.
func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...)
var resp *pubsubpb.Subscription
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetSubscription gets the configuration details of a subscription.
func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...)
var resp *pubsubpb.Subscription
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateSubscription updates an existing subscription. Note that certain properties of a
// subscription, such as its topic, are not modifiable.
func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...)
var resp *pubsubpb.Subscription
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListSubscriptions lists matching subscriptions.
func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...)
it := &SubscriptionIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) {
var resp *pubsubpb.ListSubscriptionsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Subscriptions, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// DeleteSubscription deletes an existing subscription. All messages retained in the subscription
// are immediately dropped. Calls to `Pull` after deletion will return
// `NOT_FOUND`. After a subscription is deleted, a new one may be created with
// the same name, but the new one has no association with the old
// subscription or its topic unless the same topic is specified.
func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful
// to indicate that more time is needed to process a message by the
// subscriber, or to make the message available for redelivery if the
// processing was interrupted. Note that this does not modify the
// subscription-level `ackDeadlineSeconds` used for subsequent messages.
func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// Acknowledge acknowledges the messages associated with the `ack_ids` in the
// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages
// from the subscription.
//
// Acknowledging a message whose ack deadline has expired may succeed,
// but such a message may be redelivered later. Acknowledging a message more
// than once will not result in an error.
func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// Pull pulls messages from the server. Returns an empty list if there are no
// messages available in the backlog. The server may return `UNAVAILABLE` if
// there are too many concurrent pull requests pending for the given
// subscription.
func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...)
var resp *pubsubpb.PullResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// StreamingPull (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
// respond with UNIMPLEMENTED errors unless you have been invited to test
// this feature. Contact cloud-pubsub@google.com with any questions.
//
// Establishes a stream with the server, which sends messages down to the
// client. The client streams acknowledgements and ack deadline modifications
// back to the server. The server will close the stream and return the status
// on any error. The server may close the stream with status `OK` to reassign
// server-side resources, in which case, the client should re-establish the
// stream. `UNAVAILABLE` may also be returned in the case of a transient error
// (e.g., a server restart). These should also be retried by the client. Flow
// control can be achieved by configuring the underlying RPC channel.
func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...)
var resp pubsubpb.Subscriber_StreamingPullClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ModifyPushConfig modifies the `PushConfig` for a specified subscription.
//
// This may be used to change a push subscription to a pull one (signified by
// an empty `PushConfig`) or vice versa, or change the endpoint URL and other
// attributes of a push subscription. Messages will accumulate for delivery
// continuously through the call regardless of changes to the `PushConfig`.
func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ListSnapshots lists the existing snapshots.
func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...)
it := &SnapshotIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) {
var resp *pubsubpb.ListSnapshotsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Snapshots, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CreateSnapshot creates a snapshot from the requested subscription.
// If the snapshot already exists, returns `ALREADY_EXISTS`.
// If the requested subscription doesn't exist, returns `NOT_FOUND`.
//
// If the name is not provided in the request, the server will assign a random
// name for this snapshot on the same project as the subscription, conforming
// to the
// [resource name format](https://cloud.google.com/pubsub/docs/overview#names).
// The generated name is populated in the returned Snapshot object.
// Note that for REST API requests, you must specify a name in the request.
func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...)
var resp *pubsubpb.Snapshot
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot
// are immediately dropped. After a snapshot is deleted, a new one may be
// created with the same name, but the new one has no association with the old
// snapshot or its subscription, unless the same subscription is specified.
func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// Seek seeks an existing subscription to a point in time or to a given snapshot,
// whichever is provided in the request.
func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...)
var resp *pubsubpb.SeekResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// SnapshotIterator manages a stream of *pubsubpb.Snapshot.
type SnapshotIterator struct {
items []*pubsubpb.Snapshot
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *SnapshotIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) {
var item *pubsubpb.Snapshot
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *SnapshotIterator) bufLen() int {
return len(it.items)
}
func (it *SnapshotIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// SubscriptionIterator manages a stream of *pubsubpb.Subscription.
type SubscriptionIterator struct {
items []*pubsubpb.Subscription
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) {
var item *pubsubpb.Subscription
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *SubscriptionIterator) bufLen() int {
return len(it.items)
}
func (it *SubscriptionIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

View File

@@ -0,0 +1,340 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package pubsub_test
import (
"io"
"cloud.google.com/go/pubsub/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
)
func ExampleNewSubscriberClient() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleSubscriberClient_SubscriptionIAM() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
subscription := &pubsubpb.Subscription{}
h := c.SubscriptionIAM(subscription)
policy, err := h.Policy(ctx)
if err != nil {
// TODO: Handle error.
}
//TODO: Use the IAM policy
_ = policy
}
func ExampleSubscriberClient_TopicIAM() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
topic := &pubsubpb.Topic{}
h := c.TopicIAM(topic)
policy, err := h.Policy(ctx)
if err != nil {
// TODO: Handle error.
}
//TODO: Use the IAM policy
_ = policy
}
func ExampleSubscriberClient_CreateSubscription() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.Subscription{
// TODO: Fill request struct fields.
}
resp, err := c.CreateSubscription(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleSubscriberClient_GetSubscription() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.GetSubscriptionRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetSubscription(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleSubscriberClient_UpdateSubscription() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.UpdateSubscriptionRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateSubscription(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleSubscriberClient_ListSubscriptions() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.ListSubscriptionsRequest{
// TODO: Fill request struct fields.
}
it := c.ListSubscriptions(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleSubscriberClient_DeleteSubscription() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.DeleteSubscriptionRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteSubscription(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleSubscriberClient_ModifyAckDeadline() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.ModifyAckDeadlineRequest{
// TODO: Fill request struct fields.
}
err = c.ModifyAckDeadline(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleSubscriberClient_Acknowledge() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.AcknowledgeRequest{
// TODO: Fill request struct fields.
}
err = c.Acknowledge(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleSubscriberClient_Pull() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.PullRequest{
// TODO: Fill request struct fields.
}
resp, err := c.Pull(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleSubscriberClient_StreamingPull() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
stream, err := c.StreamingPull(ctx)
if err != nil {
// TODO: Handle error.
}
go func() {
reqs := []*pubsubpb.StreamingPullRequest{
// TODO: Create requests.
}
for _, req := range reqs {
if err := stream.Send(req); err != nil {
// TODO: Handle error.
}
}
stream.CloseSend()
}()
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// TODO: handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleSubscriberClient_ModifyPushConfig() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.ModifyPushConfigRequest{
// TODO: Fill request struct fields.
}
err = c.ModifyPushConfig(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleSubscriberClient_ListSnapshots() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.ListSnapshotsRequest{
// TODO: Fill request struct fields.
}
it := c.ListSnapshots(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleSubscriberClient_CreateSnapshot() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.CreateSnapshotRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateSnapshot(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleSubscriberClient_DeleteSnapshot() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.DeleteSnapshotRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteSnapshot(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleSubscriberClient_Seek() {
ctx := context.Background()
c, err := pubsub.NewSubscriberClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &pubsubpb.SeekRequest{
// TODO: Fill request struct fields.
}
resp, err := c.Seek(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

120
vendor/cloud.google.com/go/pubsub/doc.go generated vendored Normal file
View File

@@ -0,0 +1,120 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub
messages, hiding the the details of the underlying server RPCs. Google Cloud
Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders
and receivers.
Note: This package is in beta. Some backwards-incompatible changes may occur.
More information about Google Cloud Pub/Sub is available at
https://cloud.google.com/pubsub/docs
Publishing
Google Cloud Pub/Sub messages are published to topics. Topics may be created
using the pubsub package like so:
topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name")
Messages may then be published to a topic:
res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")})
Publish queues the message for publishing and returns immediately. When enough
messages have accumulated, or enough time has elapsed, the batch of messages is
sent to the Pub/Sub service.
Publish returns a PublishResult, which behaves like a future: its Get method
blocks until the message has been sent to the service.
The first time you call Publish on a topic, goroutines are started in the
background. To clean up these goroutines, call Stop:
topic.Stop()
Receiving
To receive messages published to a topic, clients create subscriptions
to the topic. There may be more than one subscription per topic; each message
that is published to the topic will be delivered to all of its subscriptions.
Subsciptions may be created like so:
sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name",
pubsub.SubscriptionConfig{Topic: topic})
Messages are then consumed from a subscription via callback.
err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) {
log.Printf("Got message: %s", m.Data)
m.Ack()
})
if err != nil {
// Handle error.
}
The callback is invoked concurrently by multiple goroutines, maximizing
throughput. To terminate a call to Receive, cancel its context.
Once client code has processed the message, it must call Message.Ack, otherwise
the message will eventually be redelivered. As an optimization, if the client
cannot or doesn't want to process the message, it can call Message.Nack to
speed redelivery. For more information and configuration options, see
"Deadlines" below.
Note: It is possible for Messages to be redelivered, even if Message.Ack has
been called. Client code must be robust to multiple deliveries of messages.
Deadlines
The default pubsub deadlines are suitable for most use cases, but may be
overridden. This section describes the tradeoffs that should be considered
when overriding the defaults.
Behind the scenes, each message returned by the Pub/Sub server has an
associated lease, known as an "ACK deadline".
Unless a message is acknowledged within the ACK deadline, or the client requests that
the ACK deadline be extended, the message will become elegible for redelivery.
As a convenience, the pubsub package will automatically extend deadlines until
either:
* Message.Ack or Message.Nack is called, or
* the "MaxExtension" period elapses from the time the message is fetched from the server.
The initial ACK deadline given to each messages defaults to 10 seconds, but may
be overridden during subscription creation. Selecting an ACK deadline is a
tradeoff between message redelivery latency and RPC volume. If the pubsub
package fails to acknowledge or extend a message (e.g. due to unexpected
termination of the process), a shorter ACK deadline will generally result in
faster message redelivery by the Pub/Sub system. However, a short ACK deadline
may also increase the number of deadline extension RPCs that the pubsub package
sends to the server.
The default max extension period is DefaultReceiveSettings.MaxExtension, and can
be overridden by setting Subscription.ReceiveSettings.MaxExtension. Selecting a
max extension period is a tradeoff between the speed at which client code must
process messages, and the redelivery delay if messages fail to be acknowledged
(e.g. because client code neglects to do so). Using a large MaxExtension
increases the available time for client code to process messages. However, if
the client code neglects to call Message.Ack/Nack, a large MaxExtension will
increase the delay before the message is redelivered.
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package pubsub // import "cloud.google.com/go/pubsub"

245
vendor/cloud.google.com/go/pubsub/endtoend_test.go generated vendored Normal file
View File

@@ -0,0 +1,245 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"bytes"
"fmt"
"log"
"math/rand"
"os"
"reflect"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"cloud.google.com/go/internal/testutil"
"google.golang.org/api/option"
)
const timeout = time.Minute * 10
const ackDeadline = time.Second * 10
const nMessages = 1e4
// Buffer log messages to debug failures.
var logBuf bytes.Buffer
// TestEndToEnd pumps many messages into a topic and tests that they are all
// delivered to each subscription for the topic. It also tests that messages
// are not unexpectedly redelivered.
func TestEndToEnd(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
log.SetOutput(&logBuf)
ctx := context.Background()
ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
now := time.Now()
topicName := fmt.Sprintf("endtoend-%d", now.UnixNano())
subPrefix := fmt.Sprintf("endtoend-%d", now.UnixNano())
client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
if err != nil {
t.Fatalf("Creating client error: %v", err)
}
var topic *Topic
if topic, err = client.CreateTopic(ctx, topicName); err != nil {
t.Fatalf("CreateTopic error: %v", err)
}
defer topic.Delete(ctx)
// Two subscriptions to the same topic.
var subs [2]*Subscription
for i := 0; i < len(subs); i++ {
subs[i], err = client.CreateSubscription(ctx, fmt.Sprintf("%s-%d", subPrefix, i), SubscriptionConfig{
Topic: topic,
AckDeadline: ackDeadline,
})
if err != nil {
t.Fatalf("CreateSub error: %v", err)
}
defer subs[i].Delete(ctx)
}
ids, err := publish(ctx, topic, nMessages)
topic.Stop()
if err != nil {
t.Fatalf("publish: %v", err)
}
wantCounts := make(map[string]int)
for _, id := range ids {
wantCounts[id] = 1
}
// recv provides an indication that messages are still arriving.
recv := make(chan struct{})
// We have two subscriptions to our topic.
// Each subscription will get a copy of each published message.
var wg sync.WaitGroup
cctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
consumers := []*consumer{
{counts: make(map[string]int), recv: recv, durations: []time.Duration{time.Hour}},
{counts: make(map[string]int), recv: recv,
durations: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2, time.Hour}},
}
for i, con := range consumers {
con := con
sub := subs[i]
wg.Add(1)
go func() {
defer wg.Done()
con.consume(t, cctx, sub)
}()
}
// Wait for a while after the last message before declaring quiescence.
// We wait a multiple of the ack deadline, for two reasons:
// 1. To detect if messages are redelivered after having their ack
// deadline extended.
// 2. To wait for redelivery of messages that were en route when a Receive
// is canceled. This can take considerably longer than the ack deadline.
quiescenceDur := ackDeadline * 6
quiescenceTimer := time.NewTimer(quiescenceDur)
loop:
for {
select {
case <-recv:
// Reset timer so we wait quiescenceDur after the last message.
// See https://godoc.org/time#Timer.Reset for why the Stop
// and channel drain are necessary.
if !quiescenceTimer.Stop() {
<-quiescenceTimer.C
}
quiescenceTimer.Reset(quiescenceDur)
case <-quiescenceTimer.C:
cancel()
log.Println("quiesced")
break loop
case <-cctx.Done():
t.Fatal("timed out")
}
}
wg.Wait()
ok := true
for i, con := range consumers {
if got, want := con.counts, wantCounts; !reflect.DeepEqual(got, want) {
t.Errorf("%d: message counts: %v\n", i, diff(got, want))
ok = false
}
}
if !ok {
logBuf.WriteTo(os.Stdout)
}
}
// publish publishes n messages to topic, and returns the published message IDs.
func publish(ctx context.Context, topic *Topic, n int) ([]string, error) {
var rs []*PublishResult
for i := 0; i < n; i++ {
m := &Message{Data: []byte(fmt.Sprintf("msg %d", i))}
rs = append(rs, topic.Publish(ctx, m))
}
var ids []string
for _, r := range rs {
id, err := r.Get(ctx)
if err != nil {
return nil, err
}
ids = append(ids, id)
}
return ids, nil
}
// consumer consumes messages according to its configuration.
type consumer struct {
durations []time.Duration
// A value is sent to recv each time Inc is called.
recv chan struct{}
mu sync.Mutex
counts map[string]int
total int
}
// consume reads messages from a subscription, and keeps track of what it receives in mc.
// After consume returns, the caller should wait on wg to ensure that no more updates to mc will be made.
func (c *consumer) consume(t *testing.T, ctx context.Context, sub *Subscription) {
for _, dur := range c.durations {
ctx2, cancel := context.WithTimeout(ctx, dur)
defer cancel()
id := sub.name[len(sub.name)-2:]
log.Printf("%s: start receive", id)
prev := c.total
err := sub.Receive(ctx2, c.process)
log.Printf("%s: end receive; read %d", id, c.total-prev)
if err != nil {
t.Errorf("error from Receive: %v", err)
return
}
select {
case <-ctx.Done():
return
default:
}
}
}
// process handles a message and records it in mc.
func (c *consumer) process(_ context.Context, m *Message) {
c.mu.Lock()
c.counts[m.ID] += 1
c.total++
c.mu.Unlock()
c.recv <- struct{}{}
// Simulate time taken to process m, while continuing to process more messages.
// Some messages will need to have their ack deadline extended due to this delay.
delay := rand.Intn(int(ackDeadline * 3))
time.AfterFunc(time.Duration(delay), m.Ack)
}
// diff returns counts of the differences between got and want.
func diff(got, want map[string]int) map[string]int {
ids := make(map[string]struct{})
for k := range got {
ids[k] = struct{}{}
}
for k := range want {
ids[k] = struct{}{}
}
gotWantCount := make(map[string]int)
for k := range ids {
if got[k] == want[k] {
continue
}
desc := fmt.Sprintf("<got: %v ; want: %v>", got[k], want[k])
gotWantCount[desc] += 1
}
return gotWantCount
}

View File

@@ -0,0 +1,54 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub_test
import (
"fmt"
"cloud.google.com/go/pubsub"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func ExampleClient_Subscriptions() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// List all subscriptions of the project.
it := client.Subscriptions(ctx)
_ = it // TODO: iterate using Next.
}
func ExampleSubscriptionIterator_Next() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// List all subscriptions of the project.
it := client.Subscriptions(ctx)
for {
sub, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(sub)
}
}

269
vendor/cloud.google.com/go/pubsub/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,269 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub_test
import (
"fmt"
"time"
"cloud.google.com/go/pubsub"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func ExampleNewClient() {
ctx := context.Background()
_, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// See the other examples to learn how to use the Client.
}
func ExampleClient_CreateTopic() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// Create a new topic with the given name.
topic, err := client.CreateTopic(ctx, "topicName")
if err != nil {
// TODO: Handle error.
}
_ = topic // TODO: use the topic.
}
func ExampleClient_CreateSubscription() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// Create a new topic with the given name.
topic, err := client.CreateTopic(ctx, "topicName")
if err != nil {
// TODO: Handle error.
}
// Create a new subscription to the previously created topic
// with the given name.
sub, err := client.CreateSubscription(ctx, "subName", pubsub.SubscriptionConfig{
Topic: topic,
AckDeadline: 10 * time.Second,
})
if err != nil {
// TODO: Handle error.
}
_ = sub // TODO: use the subscription.
}
func ExampleTopic_Delete() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
topic := client.Topic("topicName")
if err := topic.Delete(ctx); err != nil {
// TODO: Handle error.
}
}
func ExampleTopic_Exists() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
topic := client.Topic("topicName")
ok, err := topic.Exists(ctx)
if err != nil {
// TODO: Handle error.
}
if !ok {
// Topic doesn't exist.
}
}
func ExampleTopic_Publish() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
topic := client.Topic("topicName")
defer topic.Stop()
var results []*pubsub.PublishResult
r := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"),
})
results = append(results, r)
// Do other work ...
for _, r := range results {
id, err := r.Get(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("Published a message with a message ID: %s\n", id)
}
}
func ExampleTopic_Subscriptions() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
topic := client.Topic("topic-name")
// List all subscriptions of the topic (maybe of multiple projects).
for subs := topic.Subscriptions(ctx); ; {
sub, err := subs.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
_ = sub // TODO: use the subscription.
}
}
func ExampleSubscription_Delete() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
if err := sub.Delete(ctx); err != nil {
// TODO: Handle error.
}
}
func ExampleSubscription_Exists() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
ok, err := sub.Exists(ctx)
if err != nil {
// TODO: Handle error.
}
if !ok {
// Subscription doesn't exist.
}
}
func ExampleSubscription_Config() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
config, err := sub.Config(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Println(config)
}
func ExampleSubscription_Receive() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
// TODO: Handle message.
// NOTE: May be called concurrently; synchronize access to shared memory.
m.Ack()
})
if err != context.Canceled {
// TODO: Handle error.
}
}
// This example shows how to configure keepalive so that unacknoweldged messages
// expire quickly, allowing other subscribers to take them.
func ExampleSubscription_Receive_maxExtension() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
// This program is expected to process and acknowledge messages in 30 seconds. If
// not, the Pub/Sub API will assume the message is not acknowledged.
sub.ReceiveSettings.MaxExtension = 30 * time.Second
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
// TODO: Handle message.
m.Ack()
})
if err != context.Canceled {
// TODO: Handle error.
}
}
// This example shows how to throttle Subscription.Receive, which aims for high
// throughput by default. By limiting the number of messages and/or bytes being
// processed at once, you can bound your program's resource consumption.
func ExampleSubscription_Receive_maxOutstanding() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
sub.ReceiveSettings.MaxOutstandingMessages = 5
sub.ReceiveSettings.MaxOutstandingBytes = 10e6
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
// TODO: Handle message.
m.Ack()
})
if err != context.Canceled {
// TODO: Handle error.
}
}
func ExampleSubscription_Update() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
sub := client.Subscription("subName")
subConfig, err := sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
})
if err != nil {
// TODO: Handle error.
}
_ = subConfig // TODO: Use SubscriptionConfig.
}

View File

@@ -0,0 +1,53 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub_test
import (
"fmt"
"cloud.google.com/go/pubsub"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func ExampleClient_Topics() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
it := client.Topics(ctx)
_ = it // TODO: iterate using Next.
}
func ExampleTopicIterator_Next() {
ctx := context.Background()
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
// List all topics.
it := client.Topics(ctx)
for {
t, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
fmt.Println(t)
}
}

147
vendor/cloud.google.com/go/pubsub/fake_test.go generated vendored Normal file
View File

@@ -0,0 +1,147 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
// This file provides a fake/mock in-memory pubsub server.
// (Really just a mock at the moment, but we hope to turn it into
// more of a fake.)
import (
"io"
"sync"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)
type fakeServer struct {
pb.PublisherServer
pb.SubscriberServer
Addr string
mu sync.Mutex
Acked map[string]bool // acked message IDs
Deadlines map[string]int32 // deadlines by message ID
pullResponses []*pullResponse
wg sync.WaitGroup
}
type pullResponse struct {
msgs []*pb.ReceivedMessage
err error
}
func newFakeServer() (*fakeServer, error) {
srv, err := testutil.NewServer()
if err != nil {
return nil, err
}
fake := &fakeServer{
Addr: srv.Addr,
Acked: map[string]bool{},
Deadlines: map[string]int32{},
}
pb.RegisterPublisherServer(srv.Gsrv, fake)
pb.RegisterSubscriberServer(srv.Gsrv, fake)
srv.Start()
return fake, nil
}
// Each call to addStreamingPullMessages results in one StreamingPullResponse.
func (s *fakeServer) addStreamingPullMessages(msgs []*pb.ReceivedMessage) {
s.pullResponses = append(s.pullResponses, &pullResponse{msgs, nil})
}
func (s *fakeServer) addStreamingPullError(err error) {
s.pullResponses = append(s.pullResponses, &pullResponse{nil, err})
}
func (s *fakeServer) wait() {
s.wg.Wait()
}
func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) error {
s.wg.Add(1)
defer s.wg.Done()
errc := make(chan error, 1)
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
req, err := stream.Recv()
if err != nil {
errc <- err
return
}
s.mu.Lock()
for _, id := range req.AckIds {
s.Acked[id] = true
}
for i, id := range req.ModifyDeadlineAckIds {
s.Deadlines[id] = req.ModifyDeadlineSeconds[i]
}
s.mu.Unlock()
}
}()
// Send responses.
for {
s.mu.Lock()
if len(s.pullResponses) == 0 {
s.mu.Unlock()
// Nothing to send, so wait for the client to shut down the stream.
err := <-errc // a real error, or at least EOF
if err == io.EOF {
return nil
}
return err
}
pr := s.pullResponses[0]
s.pullResponses = s.pullResponses[1:]
s.mu.Unlock()
if pr.err != nil {
// Add a slight delay to ensure the server receives any
// messages en route from the client before shutting down the stream.
// This reduces flakiness of tests involving retry.
time.Sleep(100 * time.Millisecond)
}
if pr.err == io.EOF {
return nil
}
if pr.err != nil {
return pr.err
}
// Return any error from Recv.
select {
case err := <-errc:
return err
default:
}
res := &pb.StreamingPullResponse{ReceivedMessages: pr.msgs}
if err := stream.Send(res); err != nil {
return err
}
}
}
func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) {
return &pb.Subscription{
Name: req.Subscription,
AckDeadlineSeconds: 10,
PushConfig: &pb.PushConfig{},
}, nil
}

106
vendor/cloud.google.com/go/pubsub/flow_controller.go generated vendored Normal file
View File

@@ -0,0 +1,106 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"golang.org/x/net/context"
"golang.org/x/sync/semaphore"
)
// flowController implements flow control for Subscription.Receive.
type flowController struct {
maxSize int // max total size of messages
semCount, semSize *semaphore.Weighted // enforces max number and size of messages
}
// newFlowController creates a new flowController that ensures no more than
// maxCount messages or maxSize bytes are outstanding at once. If maxCount or
// maxSize is < 1, then an unlimited number of messages or bytes is permitted,
// respectively.
func newFlowController(maxCount, maxSize int) *flowController {
fc := &flowController{
maxSize: maxSize,
semCount: nil,
semSize: nil,
}
if maxCount > 0 {
fc.semCount = semaphore.NewWeighted(int64(maxCount))
}
if maxSize > 0 {
fc.semSize = semaphore.NewWeighted(int64(maxSize))
}
return fc
}
// acquire blocks until one message of size bytes can proceed or ctx is done.
// It returns nil in the first case, or ctx.Err() in the second.
//
// acquire allows large messages to proceed by treating a size greater than maxSize
// as if it were equal to maxSize.
func (f *flowController) acquire(ctx context.Context, size int) error {
if f.semCount != nil {
if err := f.semCount.Acquire(ctx, 1); err != nil {
return err
}
}
if f.semSize != nil {
if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil {
if f.semCount != nil {
f.semCount.Release(1)
}
return err
}
}
return nil
}
// tryAcquire returns false if acquire would block. Otherwise, it behaves like
// acquire and returns true.
//
// tryAcquire allows large messages to proceed by treating a size greater than
// maxSize as if it were equal to maxSize.
func (f *flowController) tryAcquire(size int) bool {
if f.semCount != nil {
if !f.semCount.TryAcquire(1) {
return false
}
}
if f.semSize != nil {
if !f.semSize.TryAcquire(f.bound(size)) {
if f.semCount != nil {
f.semCount.Release(1)
}
return false
}
}
return true
}
// release notes that one message of size bytes is no longer outstanding.
func (f *flowController) release(size int) {
if f.semCount != nil {
f.semCount.Release(1)
}
if f.semSize != nil {
f.semSize.Release(f.bound(size))
}
}
func (f *flowController) bound(size int) int64 {
if size > f.maxSize {
return int64(f.maxSize)
}
return int64(size)
}

View File

@@ -0,0 +1,236 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"fmt"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
func TestFlowControllerCancel(t *testing.T) {
// Test canceling a flow controller's context.
t.Parallel()
fc := newFlowController(3, 10)
if err := fc.acquire(context.Background(), 5); err != nil {
t.Fatal(err)
}
// Experiment: a context that times out should always return an error.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond)
defer cancel()
if err := fc.acquire(ctx, 6); err != context.DeadlineExceeded {
t.Fatalf("got %v, expected DeadlineExceeded", err)
}
// Control: a context that is not done should always return nil.
go func() {
time.Sleep(5 * time.Millisecond)
fc.release(5)
}()
if err := fc.acquire(context.Background(), 6); err != nil {
t.Errorf("got %v, expected nil", err)
}
}
func TestFlowControllerLargeRequest(t *testing.T) {
// Large requests succeed, consuming the entire allotment.
t.Parallel()
fc := newFlowController(3, 10)
err := fc.acquire(context.Background(), 11)
if err != nil {
t.Fatal(err)
}
}
func TestFlowControllerNoStarve(t *testing.T) {
// A large request won't starve, because the flowController is
// (best-effort) FIFO.
t.Parallel()
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
fc := newFlowController(10, 10)
first := make(chan int)
for i := 0; i < 20; i++ {
go func() {
for {
if err := fc.acquire(ctx, 1); err != nil {
if err != context.Canceled {
t.Error(err)
}
return
}
select {
case first <- 1:
default:
}
fc.release(1)
}
}()
}
<-first // Wait until the flowController's state is non-zero.
if err := fc.acquire(ctx, 11); err != nil {
t.Errorf("got %v, want nil", err)
}
}
func TestFlowControllerSaturation(t *testing.T) {
t.Parallel()
const (
maxCount = 6
maxSize = 10
)
for _, test := range []struct {
acquireSize int
wantCount, wantSize int64
}{
{
// Many small acquires cause the flow controller to reach its max count.
acquireSize: 1,
wantCount: 6,
wantSize: 6,
},
{
// Five acquires of size 2 will cause the flow controller to reach its max size,
// but not its max count.
acquireSize: 2,
wantCount: 5,
wantSize: 10,
},
{
// If the requests are the right size (relatively prime to maxSize),
// the flow controller will not saturate on size. (In this case, not on count either.)
acquireSize: 3,
wantCount: 3,
wantSize: 9,
},
} {
fc := newFlowController(maxCount, maxSize)
// Atomically track flow controller state.
var curCount, curSize int64
success := errors.New("")
// Time out if wantSize or wantCount is never reached.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
g, ctx := errgroup.WithContext(ctx)
for i := 0; i < 10; i++ {
g.Go(func() error {
var hitCount, hitSize bool
// Run at least until we hit the expected values, and at least
// for enough iterations to exceed them if the flow controller
// is broken.
for i := 0; i < 100 || !hitCount || !hitSize; i++ {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if err := fc.acquire(ctx, test.acquireSize); err != nil {
return err
}
c := atomic.AddInt64(&curCount, 1)
if c > test.wantCount {
return fmt.Errorf("count %d exceeds want %d", c, test.wantCount)
}
if c == test.wantCount {
hitCount = true
}
s := atomic.AddInt64(&curSize, int64(test.acquireSize))
if s > test.wantSize {
return fmt.Errorf("size %d exceeds want %d", s, test.wantSize)
}
if s == test.wantSize {
hitSize = true
}
time.Sleep(5 * time.Millisecond) // Let other goroutines make progress.
if atomic.AddInt64(&curCount, -1) < 0 {
return errors.New("negative count")
}
if atomic.AddInt64(&curSize, -int64(test.acquireSize)) < 0 {
return errors.New("negative size")
}
fc.release(test.acquireSize)
}
return success
})
}
if err := g.Wait(); err != success {
t.Errorf("%+v: %v", test, err)
continue
}
}
}
func TestFlowControllerTryAcquire(t *testing.T) {
fc := newFlowController(3, 10)
// Successfully tryAcquire 4 bytes.
if !fc.tryAcquire(4) {
t.Error("got false, wanted true")
}
// Fail to tryAcquire 7 bytes.
if fc.tryAcquire(7) {
t.Error("got true, wanted false")
}
// Successfully tryAcquire 6 byte.
if !fc.tryAcquire(6) {
t.Error("got false, wanted true")
}
}
func TestFlowControllerUnboundedCount(t *testing.T) {
ctx := context.Background()
fc := newFlowController(0, 10)
// Successfully acquire 4 bytes.
if err := fc.acquire(ctx, 4); err != nil {
t.Errorf("got %v, wanted no error", err)
}
// Successfully tryAcquire 4 bytes.
if !fc.tryAcquire(4) {
t.Error("got false, wanted true")
}
// Fail to tryAcquire 3 bytes.
if fc.tryAcquire(3) {
t.Error("got true, wanted false")
}
}
func TestFlowControllerUnboundedBytes(t *testing.T) {
ctx := context.Background()
fc := newFlowController(2, 0)
// Successfully acquire 4GB.
if err := fc.acquire(ctx, 4e9); err != nil {
t.Errorf("got %v, wanted no error", err)
}
// Successfully tryAcquire 4GB bytes.
if !fc.tryAcquire(4e9) {
t.Error("got false, wanted true")
}
// Fail to tryAcquire a third message.
if fc.tryAcquire(3) {
t.Error("got true, wanted false")
}
}

351
vendor/cloud.google.com/go/pubsub/integration_test.go generated vendored Normal file
View File

@@ -0,0 +1,351 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"fmt"
"reflect"
"testing"
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/testutil"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
// messageData is used to hold the contents of a message so that it can be compared against the contents
// of another message without regard to irrelevant fields.
type messageData struct {
ID string
Data []byte
Attributes map[string]string
}
func extractMessageData(m *Message) *messageData {
return &messageData{
ID: m.ID,
Data: m.Data,
Attributes: m.Attributes,
}
}
func TestAll(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
projID := testutil.ProjID()
if projID == "" {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
now := time.Now()
topicName := fmt.Sprintf("topic-%d", now.Unix())
subName := fmt.Sprintf("subscription-%d", now.Unix())
client, err := NewClient(ctx, projID, option.WithTokenSource(ts))
if err != nil {
t.Fatalf("Creating client error: %v", err)
}
defer client.Close()
var topic *Topic
if topic, err = client.CreateTopic(ctx, topicName); err != nil {
t.Errorf("CreateTopic error: %v", err)
}
defer topic.Stop()
var sub *Subscription
if sub, err = client.CreateSubscription(ctx, subName, SubscriptionConfig{Topic: topic}); err != nil {
t.Errorf("CreateSub error: %v", err)
}
exists, err := topic.Exists(ctx)
if err != nil {
t.Fatalf("TopicExists error: %v", err)
}
if !exists {
t.Errorf("topic %s should exist, but it doesn't", topic)
}
exists, err = sub.Exists(ctx)
if err != nil {
t.Fatalf("SubExists error: %v", err)
}
if !exists {
t.Errorf("subscription %s should exist, but it doesn't", subName)
}
msgs := []*Message{}
for i := 0; i < 10; i++ {
text := fmt.Sprintf("a message with an index %d", i)
attrs := make(map[string]string)
attrs["foo"] = "bar"
msgs = append(msgs, &Message{
Data: []byte(text),
Attributes: attrs,
})
}
// Publish the messages.
type pubResult struct {
m *Message
r *PublishResult
}
var rs []pubResult
for _, m := range msgs {
r := topic.Publish(ctx, m)
rs = append(rs, pubResult{m, r})
}
want := make(map[string]*messageData)
for _, res := range rs {
id, err := res.r.Get(ctx)
if err != nil {
t.Fatal(err)
}
md := extractMessageData(res.m)
md.ID = id
want[md.ID] = md
}
// Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available.
timeoutCtx, _ := context.WithTimeout(ctx, time.Minute)
gotMsgs, err := pullN(timeoutCtx, sub, len(want), func(ctx context.Context, m *Message) {
m.Ack()
})
if err != nil {
t.Fatalf("Pull: %v", err)
}
got := make(map[string]*messageData)
for _, m := range gotMsgs {
md := extractMessageData(m)
got[md.ID] = md
}
if !reflect.DeepEqual(got, want) {
t.Errorf("messages: got: %v ; want: %v", got, want)
}
if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok {
t.Errorf("topic IAM: %s", msg)
}
if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok {
t.Errorf("sub IAM: %s", msg)
}
snap, err := sub.createSnapshot(ctx, "")
if err != nil {
t.Fatalf("CreateSnapshot error: %v", err)
}
timeoutCtx, _ = context.WithTimeout(ctx, time.Minute)
err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) {
snapIt := client.snapshots(timeoutCtx)
for {
s, err := snapIt.Next()
if err == nil && s.name == snap.name {
return true, nil
}
if err == iterator.Done {
return false, fmt.Errorf("cannot find snapshot: %q", snap.name)
}
if err != nil {
return false, err
}
}
})
if err != nil {
t.Error(err)
}
err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) {
err := sub.seekToSnapshot(timeoutCtx, snap.snapshot)
return err == nil, err
})
if err != nil {
t.Error(err)
}
err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) {
err := sub.seekToTime(timeoutCtx, time.Now())
return err == nil, err
})
if err != nil {
t.Error(err)
}
err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) {
snapHandle := client.snapshot(snap.ID())
err := snapHandle.delete(timeoutCtx)
return err == nil, err
})
if err != nil {
t.Error(err)
}
if err := sub.Delete(ctx); err != nil {
t.Errorf("DeleteSub error: %v", err)
}
if err := topic.Delete(ctx); err != nil {
t.Errorf("DeleteTopic error: %v", err)
}
}
// IAM tests.
// NOTE: for these to succeed, the test runner identity must have the Pub/Sub Admin or Owner roles.
// To set, visit https://console.developers.google.com, select "IAM & Admin" from the top-left
// menu, choose the account, click the Roles dropdown, and select "Pub/Sub > Pub/Sub Admin".
// TODO(jba): move this to a testing package within cloud.google.com/iam, so we can re-use it.
func testIAM(ctx context.Context, h *iam.Handle, permission string) (msg string, ok bool) {
// Attempting to add an non-existent identity (e.g. "alice@example.com") causes the service
// to return an internal error, so use a real identity.
const member = "domain:google.com"
var policy *iam.Policy
var err error
if policy, err = h.Policy(ctx); err != nil {
return fmt.Sprintf("Policy: %v", err), false
}
// The resource is new, so the policy should be empty.
if got := policy.Roles(); len(got) > 0 {
return fmt.Sprintf("initially: got roles %v, want none", got), false
}
// Add a member, set the policy, then check that the member is present.
policy.Add(member, iam.Viewer)
if err := h.SetPolicy(ctx, policy); err != nil {
return fmt.Sprintf("SetPolicy: %v", err), false
}
if policy, err = h.Policy(ctx); err != nil {
return fmt.Sprintf("Policy: %v", err), false
}
if got, want := policy.Members(iam.Viewer), []string{member}; !reflect.DeepEqual(got, want) {
return fmt.Sprintf("after Add: got %v, want %v", got, want), false
}
// Now remove that member, set the policy, and check that it's empty again.
policy.Remove(member, iam.Viewer)
if err := h.SetPolicy(ctx, policy); err != nil {
return fmt.Sprintf("SetPolicy: %v", err), false
}
if policy, err = h.Policy(ctx); err != nil {
return fmt.Sprintf("Policy: %v", err), false
}
if got := policy.Roles(); len(got) > 0 {
return fmt.Sprintf("after Remove: got roles %v, want none", got), false
}
// Call TestPermissions.
// Because this user is an admin, it has all the permissions on the
// resource type. Note: the service fails if we ask for inapplicable
// permissions (e.g. a subscription permission on a topic, or a topic
// create permission on a topic rather than its parent).
wantPerms := []string{permission}
gotPerms, err := h.TestPermissions(ctx, wantPerms)
if err != nil {
return fmt.Sprintf("TestPermissions: %v", err), false
}
if !reflect.DeepEqual(gotPerms, wantPerms) {
return fmt.Sprintf("TestPermissions: got %v, want %v", gotPerms, wantPerms), false
}
return "", true
}
func TestSubscriptionUpdate(t *testing.T) {
t.Parallel()
ctx := context.Background()
if testing.Short() {
t.Skip("Integration tests skipped in short mode")
}
projID := testutil.ProjID()
if projID == "" {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details.")
}
ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
now := time.Now()
topicName := fmt.Sprintf("topic-modify-%d", now.Unix())
subName := fmt.Sprintf("subscription-modify-%d", now.Unix())
client, err := NewClient(ctx, projID, option.WithTokenSource(ts))
if err != nil {
t.Fatalf("Creating client error: %v", err)
}
defer client.Close()
var topic *Topic
if topic, err = client.CreateTopic(ctx, topicName); err != nil {
t.Fatalf("CreateTopic error: %v", err)
}
defer topic.Stop()
defer topic.Delete(ctx)
var sub *Subscription
if sub, err = client.CreateSubscription(ctx, subName, SubscriptionConfig{Topic: topic}); err != nil {
t.Fatalf("CreateSub error: %v", err)
}
defer sub.Delete(ctx)
sc, err := sub.Config(ctx)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(sc.PushConfig, PushConfig{}) {
t.Fatalf("got %+v, want empty PushConfig")
}
// Add a PushConfig.
pc := PushConfig{
Endpoint: "https://" + projID + ".appspot.com/_ah/push-handlers/push",
Attributes: map[string]string{"x-goog-version": "v1"},
}
sc, err = sub.Update(ctx, SubscriptionConfigToUpdate{PushConfig: &pc})
if err != nil {
t.Fatal(err)
}
// Despite the docs which say that Get always returns a valid "x-goog-version"
// attribute, none is returned. See
// https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PushConfig
pc.Attributes = nil
if got, want := sc.PushConfig, pc; !reflect.DeepEqual(got, want) {
t.Fatalf("setting push config: got\n%+v\nwant\n%+v", got, want)
}
// Remove the PushConfig, turning the subscription back into pull mode.
pc = PushConfig{}
sc, err = sub.Update(ctx, SubscriptionConfigToUpdate{PushConfig: &pc})
if err != nil {
t.Fatal(err)
}
if got, want := sc.PushConfig, pc; !reflect.DeepEqual(got, want) {
t.Fatalf("removing push config: got\n%+v\nwant %+v", got, want)
}
// If nothing changes, our client returns an error.
_, err = sub.Update(ctx, SubscriptionConfigToUpdate{})
if err == nil {
t.Fatal("got nil, wanted error")
}
}

520
vendor/cloud.google.com/go/pubsub/iterator.go generated vendored Normal file
View File

@@ -0,0 +1,520 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"log"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/support/bundler"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type messageIterator struct {
impl interface {
next() (*Message, error)
stop()
}
}
type pollingMessageIterator struct {
// kaTicker controls how often we send an ack deadline extension request.
kaTicker *time.Ticker
// ackTicker controls how often we acknowledge a batch of messages.
ackTicker *time.Ticker
ka *keepAlive
acker *acker
nacker *bundler.Bundler
puller *puller
// mu ensures that cleanup only happens once, and concurrent Stop
// invocations block until cleanup completes.
mu sync.Mutex
// closed is used to signal that Stop has been called.
closed chan struct{}
}
var useStreamingPull = false
// newMessageIterator starts a new messageIterator. Stop must be called on the messageIterator
// when it is no longer needed.
// subName is the full name of the subscription to pull messages from.
// ctx is the context to use for acking messages and extending message deadlines.
func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *messageIterator {
if !useStreamingPull {
return &messageIterator{
impl: newPollingMessageIterator(ctx, s, subName, po),
}
}
sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds()))
err := sp.open()
if grpc.Code(err) == codes.Unimplemented {
log.Println("pubsub: streaming pull unimplemented; falling back to legacy pull")
return &messageIterator{
impl: newPollingMessageIterator(ctx, s, subName, po),
}
}
// TODO(jba): handle other non-nil error?
log.Println("using streaming pull")
return &messageIterator{
impl: newStreamingMessageIterator(ctx, sp, po),
}
}
func newPollingMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *pollingMessageIterator {
// TODO: make kaTicker frequency more configurable.
// (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace.
keepAlivePeriod := po.ackDeadline - 5*time.Second
kaTicker := time.NewTicker(keepAlivePeriod) // Stopped in it.Stop
// Ack promptly so users don't lose work if client crashes.
ackTicker := time.NewTicker(100 * time.Millisecond) // Stopped in it.Stop
ka := &keepAlive{
s: s,
Ctx: ctx,
Sub: subName,
ExtensionTick: kaTicker.C,
Deadline: po.ackDeadline,
MaxExtension: po.maxExtension,
}
ack := &acker{
s: s,
Ctx: ctx,
Sub: subName,
AckTick: ackTicker.C,
Notify: ka.Remove,
}
nacker := bundler.NewBundler("", func(ackIDs interface{}) {
// NACK by setting the ack deadline to zero, to make the message
// immediately available for redelivery.
//
// If the RPC fails, nothing we can do about it. In the worst case, the
// deadline for these messages will expire and they will still get
// redelivered.
_ = s.modifyAckDeadline(ctx, subName, 0, ackIDs.([]string))
})
nacker.DelayThreshold = 100 * time.Millisecond // nack promptly
nacker.BundleCountThreshold = 10
pull := newPuller(s, subName, ctx, po.maxPrefetch, ka.Add, ka.Remove)
ka.Start()
ack.Start()
return &pollingMessageIterator{
kaTicker: kaTicker,
ackTicker: ackTicker,
ka: ka,
acker: ack,
nacker: nacker,
puller: pull,
closed: make(chan struct{}),
}
}
// Next returns the next Message to be processed. The caller must call
// Message.Done when finished with it.
// Once Stop has been called, calls to Next will return iterator.Done.
func (it *messageIterator) Next() (*Message, error) {
return it.impl.next()
}
func (it *pollingMessageIterator) next() (*Message, error) {
m, err := it.puller.Next()
if err == nil {
m.doneFunc = it.done
return m, nil
}
select {
// If Stop has been called, we return Done regardless the value of err.
case <-it.closed:
return nil, iterator.Done
default:
return nil, err
}
}
// Client code must call Stop on a messageIterator when finished with it.
// Stop will block until Done has been called on all Messages that have been
// returned by Next, or until the context with which the messageIterator was created
// is cancelled or exceeds its deadline.
// Stop need only be called once, but may be called multiple times from
// multiple goroutines.
func (it *messageIterator) Stop() {
it.impl.stop()
}
func (it *pollingMessageIterator) stop() {
it.mu.Lock()
defer it.mu.Unlock()
select {
case <-it.closed:
// Cleanup has already been performed.
return
default:
}
// We close this channel before calling it.puller.Stop to ensure that we
// reliably return iterator.Done from Next.
close(it.closed)
// Stop the puller. Once this completes, no more messages will be added
// to it.ka.
it.puller.Stop()
// Start acking messages as they arrive, ignoring ackTicker. This will
// result in it.ka.Stop, below, returning as soon as possible.
it.acker.FastMode()
// This will block until
// (a) it.ka.Ctx is done, or
// (b) all messages have been removed from keepAlive.
// (b) will happen once all outstanding messages have been either ACKed or NACKed.
it.ka.Stop()
// There are no more live messages, so kill off the acker.
it.acker.Stop()
it.nacker.Flush()
it.kaTicker.Stop()
it.ackTicker.Stop()
}
func (it *pollingMessageIterator) done(ackID string, ack bool) {
if ack {
it.acker.Ack(ackID)
// There's no need to call it.ka.Remove here, as acker will
// call it via its Notify function.
} else {
it.ka.Remove(ackID)
_ = it.nacker.Add(ackID, len(ackID)) // ignore error; this is just an optimization
}
}
type streamingMessageIterator struct {
ctx context.Context
po *pullOptions
sp *streamingPuller
kaTicker *time.Ticker // keep-alive (deadline extensions)
ackTicker *time.Ticker // message acks
nackTicker *time.Ticker // message nacks (more frequent than acks)
failed chan struct{} // closed on stream error
stopped chan struct{} // closed when Stop is called
drained chan struct{} // closed when stopped && no more pending messages
msgc chan *Message
wg sync.WaitGroup
mu sync.Mutex
keepAliveDeadlines map[string]time.Time
pendingReq *pb.StreamingPullRequest
err error // error from stream failure
}
func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *pullOptions) *streamingMessageIterator {
// TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a
// reasonable default for now, because the minimum ack period is 10s. This
// gives us 5s grace.
keepAlivePeriod := po.ackDeadline - 5*time.Second
kaTicker := time.NewTicker(keepAlivePeriod)
// Ack promptly so users don't lose work if client crashes.
ackTicker := time.NewTicker(100 * time.Millisecond)
nackTicker := time.NewTicker(100 * time.Millisecond)
it := &streamingMessageIterator{
ctx: ctx,
sp: sp,
po: po,
kaTicker: kaTicker,
ackTicker: ackTicker,
nackTicker: nackTicker,
failed: make(chan struct{}),
stopped: make(chan struct{}),
drained: make(chan struct{}),
// use maxPrefetch as the channel's buffer size.
msgc: make(chan *Message, po.maxPrefetch),
keepAliveDeadlines: map[string]time.Time{},
pendingReq: &pb.StreamingPullRequest{},
}
it.wg.Add(2)
go it.receiver()
go it.sender()
return it
}
func (it *streamingMessageIterator) next() (*Message, error) {
// If ctx has been cancelled or the iterator is done, return straight
// away (even if there are buffered messages available).
select {
case <-it.ctx.Done():
return nil, it.ctx.Err()
case <-it.failed:
break
case <-it.stopped:
break
default:
// Wait for a message, but also for one of the above conditions.
select {
case msg := <-it.msgc:
// Since active select cases are chosen at random, this can return
// nil (from the channel close) even if it.failed or it.stopped is
// closed.
if msg == nil {
break
}
msg.doneFunc = it.done
return msg, nil
case <-it.ctx.Done():
return nil, it.ctx.Err()
case <-it.failed:
break
case <-it.stopped:
break
}
}
// Here if the iterator is done.
it.mu.Lock()
defer it.mu.Unlock()
return nil, it.err
}
func (it *streamingMessageIterator) stop() {
it.mu.Lock()
select {
case <-it.stopped:
it.mu.Unlock()
it.wg.Wait()
return
default:
close(it.stopped)
}
if it.err == nil {
it.err = iterator.Done
}
// Before reading from the channel, see if we're already drained.
it.checkDrained()
it.mu.Unlock()
// Nack all the pending messages.
// Grab the lock separately for each message to allow the receiver
// and sender goroutines to make progress.
// Why this will eventually terminate:
// - If the receiver is not blocked on a stream Recv, then
// it will write all the messages it has received to the channel,
// then exit, closing the channel.
// - If the receiver is blocked, then this loop will eventually
// nack all the messages in the channel. Once done is called
// on the remaining messages, the iterator will be marked as drained,
// which will trigger the sender to terminate. When it does, it
// performs a CloseSend on the stream, which will result in the blocked
// stream Recv returning.
for m := range it.msgc {
it.mu.Lock()
delete(it.keepAliveDeadlines, m.ackID)
it.addDeadlineMod(m.ackID, 0)
it.checkDrained()
it.mu.Unlock()
}
it.wg.Wait()
}
// checkDrained closes the drained channel if the iterator has been stopped and all
// pending messages have either been n/acked or expired.
//
// Called with the lock held.
func (it *streamingMessageIterator) checkDrained() {
select {
case <-it.drained:
return
default:
}
select {
case <-it.stopped:
if len(it.keepAliveDeadlines) == 0 {
close(it.drained)
}
default:
}
}
// Called when a message is acked/nacked.
func (it *streamingMessageIterator) done(ackID string, ack bool) {
it.mu.Lock()
defer it.mu.Unlock()
delete(it.keepAliveDeadlines, ackID)
if ack {
it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID)
} else {
it.addDeadlineMod(ackID, 0) // Nack indicated by modifying the deadline to zero.
}
it.checkDrained()
}
// addDeadlineMod adds the ack ID to the pending request with the given deadline.
//
// Called with the lock held.
func (it *streamingMessageIterator) addDeadlineMod(ackID string, deadlineSecs int32) {
pr := it.pendingReq
pr.ModifyDeadlineAckIds = append(pr.ModifyDeadlineAckIds, ackID)
pr.ModifyDeadlineSeconds = append(pr.ModifyDeadlineSeconds, deadlineSecs)
}
// fail is called when a stream method returns a permanent error.
func (it *streamingMessageIterator) fail(err error) {
it.mu.Lock()
if it.err == nil {
it.err = err
close(it.failed)
}
it.mu.Unlock()
}
// receiver runs in a goroutine and handles all receives from the stream.
func (it *streamingMessageIterator) receiver() {
defer it.wg.Done()
defer close(it.msgc)
for {
// Stop retrieving messages if the context is done, the stream
// failed, or the iterator's Stop method was called.
select {
case <-it.ctx.Done():
return
case <-it.failed:
return
case <-it.stopped:
return
default:
}
// Receive messages from stream. This may block indefinitely.
msgs, err := it.sp.fetchMessages()
// The streamingPuller handles retries, so any error here
// is fatal to the iterator.
if err != nil {
it.fail(err)
return
}
// We received some messages. Remember them so we can
// keep them alive.
deadline := time.Now().Add(it.po.maxExtension)
it.mu.Lock()
for _, m := range msgs {
it.keepAliveDeadlines[m.ackID] = deadline
}
it.mu.Unlock()
// Deliver the messages to the channel.
for _, m := range msgs {
select {
case <-it.ctx.Done():
return
case <-it.failed:
return
// Don't return if stopped. We want to send the remaining
// messages on the channel, where they will be nacked.
case it.msgc <- m:
}
}
}
}
// sender runs in a goroutine and handles all sends to the stream.
func (it *streamingMessageIterator) sender() {
defer it.wg.Done()
defer it.kaTicker.Stop()
defer it.ackTicker.Stop()
defer it.nackTicker.Stop()
defer it.sp.closeSend()
done := false
for !done {
send := false
select {
case <-it.ctx.Done():
// Context canceled or timed out: stop immediately, without
// another RPC.
return
case <-it.failed:
// Stream failed: nothing to do, so stop immediately.
return
case <-it.drained:
// All outstanding messages have been marked done:
// nothing left to do except send the final request.
it.mu.Lock()
send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingReq.ModifyDeadlineAckIds) > 0)
done = true
case <-it.kaTicker.C:
it.mu.Lock()
send = it.handleKeepAlives()
case <-it.nackTicker.C:
it.mu.Lock()
send = (len(it.pendingReq.ModifyDeadlineAckIds) > 0)
case <-it.ackTicker.C:
it.mu.Lock()
send = (len(it.pendingReq.AckIds) > 0)
}
// Lock is held here.
if send {
req := it.pendingReq
it.pendingReq = &pb.StreamingPullRequest{}
it.mu.Unlock()
err := it.sp.send(req)
if err != nil {
// The streamingPuller handles retries, so any error here
// is fatal to the iterator.
it.fail(err)
return
}
} else {
it.mu.Unlock()
}
}
}
// handleKeepAlives modifies the pending request to include deadline extensions
// for live messages. It also purges expired messages. It reports whether
// there were any live messages.
//
// Called with the lock held.
func (it *streamingMessageIterator) handleKeepAlives() bool {
live, expired := getKeepAliveAckIDs(it.keepAliveDeadlines)
for _, e := range expired {
delete(it.keepAliveDeadlines, e)
}
dl := trunc32(int64(it.po.ackDeadline.Seconds()))
for _, m := range live {
it.addDeadlineMod(m, dl)
}
it.checkDrained()
return len(live) > 0
}

338
vendor/cloud.google.com/go/pubsub/iterator_test.go generated vendored Normal file
View File

@@ -0,0 +1,338 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"fmt"
"reflect"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
func TestReturnsDoneOnStop(t *testing.T) {
if useStreamingPull {
t.Skip("iterator tests are for polling pull only")
}
type testCase struct {
abort func(*messageIterator, context.CancelFunc)
want error
}
for _, tc := range []testCase{
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
},
want: context.Canceled,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
cancel()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
it.Stop()
},
want: iterator.Done,
},
} {
s := &blockingFetch{}
ctx, cancel := context.WithCancel(context.Background())
it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: time.Hour})
defer it.Stop()
tc.abort(it, cancel)
_, err := it.Next()
if err != tc.want {
t.Errorf("iterator Next error after abort: got:\n%v\nwant:\n%v", err, tc.want)
}
}
}
// blockingFetch implements message fetching by not returning until its context is cancelled.
type blockingFetch struct {
service
}
func (s *blockingFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
<-ctx.Done()
return nil, ctx.Err()
}
func (s *blockingFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller {
return nil
}
// justInTimeFetch simulates the situation where the iterator is aborted just after the fetch RPC
// succeeds, so the rest of puller.Next will continue to execute and return sucessfully.
type justInTimeFetch struct {
service
}
func (s *justInTimeFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
<-ctx.Done()
// The context was cancelled, but let's pretend that this happend just after our RPC returned.
var result []*Message
for i := 0; i < int(maxMessages); i++ {
val := fmt.Sprintf("msg%v", i)
result = append(result, &Message{Data: []byte(val), ackID: val})
}
return result, nil
}
func (s *justInTimeFetch) splitAckIDs(ids []string) ([]string, []string) {
return nil, nil
}
func (s *justInTimeFetch) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
return nil
}
func (s *justInTimeFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller {
return nil
}
func TestAfterAbortReturnsNoMoreThanOneMessage(t *testing.T) {
// Each test case is excercised by making two concurrent blocking calls on a
// messageIterator, and then aborting the iterator.
// The result should be one call to Next returning a message, and the other returning an error.
t.Skip(`This test has subtle timing dependencies, making it flaky.
It is not worth fixing because iterators will be removed shortly.`)
type testCase struct {
abort func(*messageIterator, context.CancelFunc)
// want is the error that should be returned from one Next invocation.
want error
}
for n := 1; n < 3; n++ {
for _, tc := range []testCase{
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
},
want: context.Canceled,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
it.Stop()
cancel()
},
want: iterator.Done,
},
{
abort: func(it *messageIterator, cancel context.CancelFunc) {
cancel()
it.Stop()
},
want: iterator.Done,
},
} {
s := &justInTimeFetch{}
ctx, cancel := context.WithCancel(context.Background())
// if maxPrefetch == 1, there will be no messages in the puller buffer when Next is invoked the second time.
// if maxPrefetch == 2, there will be 1 message in the puller buffer when Next is invoked the second time.
po := &pullOptions{
ackDeadline: time.Second * 10,
maxExtension: time.Hour,
maxPrefetch: int32(n),
}
it := newMessageIterator(ctx, s, "subname", po)
defer it.Stop()
type result struct {
m *Message
err error
}
results := make(chan *result, 2)
for i := 0; i < 2; i++ {
go func() {
m, err := it.Next()
results <- &result{m, err}
if err == nil {
m.Nack()
}
}()
}
// Wait for goroutines to block on it.Next().
time.Sleep(50 * time.Millisecond)
tc.abort(it, cancel)
result1 := <-results
result2 := <-results
// There should be one error result, and one non-error result.
// Make result1 be the non-error result.
if result1.err != nil {
result1, result2 = result2, result1
}
if string(result1.m.Data) != "msg0" {
t.Errorf("After abort, got message: %v, want %v", result1.m.Data, "msg0")
}
if result1.err != nil {
t.Errorf("After abort, got : %v, want nil", result1.err)
}
if result2.m != nil {
t.Errorf("After abort, got message: %v, want nil", result2.m)
}
if result2.err != tc.want {
t.Errorf("After abort, got err: %v, want %v", result2.err, tc.want)
}
}
}
}
type fetcherServiceWithModifyAckDeadline struct {
fetcherService
events chan string
}
func (f *fetcherServiceWithModifyAckDeadline) modifyAckDeadline(_ context.Context, _ string, d time.Duration, ids []string) error {
// Different versions of Go use different representations for time.Duration(0).
var ds string
if d == 0 {
ds = "0s"
} else {
ds = d.String()
}
f.events <- fmt.Sprintf("modAck(%v, %s)", ids, ds)
return nil
}
func (f *fetcherServiceWithModifyAckDeadline) splitAckIDs(ackIDs []string) ([]string, []string) {
return ackIDs, nil
}
func (f *fetcherServiceWithModifyAckDeadline) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller {
return nil
}
func TestMultipleStopCallsBlockUntilMessageDone(t *testing.T) {
t.Skip(`This test has subtle timing dependencies, making it flaky.
It is not worth fixing because iterators will be removed shortly.`)
events := make(chan string, 3)
s := &fetcherServiceWithModifyAckDeadline{
fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
},
},
events,
}
ctx := context.Background()
it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: 0})
m, err := it.Next()
if err != nil {
t.Errorf("error calling Next: %v", err)
}
go func() {
it.Stop()
events <- "stopped"
}()
go func() {
it.Stop()
events <- "stopped"
}()
select {
case <-events:
t.Fatal("Stop is not blocked")
case <-time.After(100 * time.Millisecond):
}
m.Nack()
got := []string{<-events, <-events, <-events}
want := []string{"modAck([a], 0s)", "stopped", "stopped"}
if !reflect.DeepEqual(got, want) {
t.Errorf("stopping iterator, got: %v ; want: %v", got, want)
}
// The iterator is stopped, so should not return another message.
m, err = it.Next()
if m != nil {
t.Errorf("message got: %v ; want: nil", m)
}
if err != iterator.Done {
t.Errorf("err got: %v ; want: %v", err, iterator.Done)
}
}
func TestFastNack(t *testing.T) {
if useStreamingPull {
t.Skip("iterator tests are for polling pull only")
}
events := make(chan string, 3)
s := &fetcherServiceWithModifyAckDeadline{
fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
},
},
events,
}
ctx := context.Background()
it := newMessageIterator(ctx, s, "subname", &pullOptions{
ackDeadline: time.Second * 6,
maxExtension: time.Second * 10,
})
// Get both messages.
_, err := it.Next()
if err != nil {
t.Errorf("error calling Next: %v", err)
}
m2, err := it.Next()
if err != nil {
t.Errorf("error calling Next: %v", err)
}
// Ignore the first, nack the second.
m2.Nack()
got := []string{<-events, <-events}
// The nack should happen before the deadline extension.
want := []string{"modAck([b], 0s)", "modAck([a], 6s)"}
if !reflect.DeepEqual(got, want) {
t.Errorf("got: %v ; want: %v", got, want)
}
}

182
vendor/cloud.google.com/go/pubsub/keepalive.go generated vendored Normal file
View File

@@ -0,0 +1,182 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"sync"
"time"
"golang.org/x/net/context"
)
// keepAlive keeps track of which Messages need to have their deadline extended, and
// periodically extends them.
// Messages are tracked by Ack ID.
type keepAlive struct {
s service
Ctx context.Context // The context to use when extending deadlines.
Sub string // The full name of the subscription.
ExtensionTick <-chan time.Time // ExtensionTick supplies the frequency with which to make extension requests.
Deadline time.Duration // How long to extend messages for each time they are extended. Should be greater than ExtensionTick frequency.
MaxExtension time.Duration // How long to keep extending each message's ack deadline before automatically removing it.
mu sync.Mutex
// key: ackID; value: time at which ack deadline extension should cease.
items map[string]time.Time
dr drain
wg sync.WaitGroup
}
// Start initiates the deadline extension loop. Stop must be called once keepAlive is no longer needed.
func (ka *keepAlive) Start() {
ka.items = make(map[string]time.Time)
ka.dr = drain{Drained: make(chan struct{})}
ka.wg.Add(1)
go func() {
defer ka.wg.Done()
for {
select {
case <-ka.Ctx.Done():
// Don't bother waiting for items to be removed: we can't extend them any more.
return
case <-ka.dr.Drained:
return
case <-ka.ExtensionTick:
live, expired := ka.getAckIDs()
ka.wg.Add(1)
go func() {
defer ka.wg.Done()
ka.extendDeadlines(live)
}()
for _, id := range expired {
ka.Remove(id)
}
}
}
}()
}
// Add adds an ack id to be kept alive.
// It should not be called after Stop.
func (ka *keepAlive) Add(ackID string) {
ka.mu.Lock()
defer ka.mu.Unlock()
ka.items[ackID] = time.Now().Add(ka.MaxExtension)
ka.dr.SetPending(true)
}
// Remove removes ackID from the list to be kept alive.
func (ka *keepAlive) Remove(ackID string) {
ka.mu.Lock()
defer ka.mu.Unlock()
// Note: If users NACKs a message after it has been removed due to
// expiring, Remove will be called twice with same ack id. This is OK.
delete(ka.items, ackID)
ka.dr.SetPending(len(ka.items) != 0)
}
// Stop waits until all added ackIDs have been removed, and cleans up resources.
// Stop may only be called once.
func (ka *keepAlive) Stop() {
ka.mu.Lock()
ka.dr.Drain()
ka.mu.Unlock()
ka.wg.Wait()
}
// getAckIDs returns the set of ackIDs that are being kept alive.
// The set is divided into two lists: one with IDs that should continue to be kept alive,
// and the other with IDs that should be dropped.
func (ka *keepAlive) getAckIDs() (live, expired []string) {
ka.mu.Lock()
defer ka.mu.Unlock()
return getKeepAliveAckIDs(ka.items)
}
func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) {
now := time.Now()
for id, expiry := range items {
if expiry.Before(now) {
expired = append(expired, id)
} else {
live = append(live, id)
}
}
return live, expired
}
const maxExtensionAttempts = 2
func (ka *keepAlive) extendDeadlines(ackIDs []string) {
head, tail := ka.s.splitAckIDs(ackIDs)
for len(head) > 0 {
for i := 0; i < maxExtensionAttempts; i++ {
if ka.s.modifyAckDeadline(ka.Ctx, ka.Sub, ka.Deadline, head) == nil {
break
}
}
// NOTE: Messages whose deadlines we fail to extend will
// eventually be redelivered and this is a documented behaviour
// of the API.
//
// NOTE: If we fail to extend deadlines here, this
// implementation will continue to attempt extending the
// deadlines for those ack IDs the next time the extension
// ticker ticks. By then the deadline will have expired.
// Re-extending them is harmless, however.
//
// TODO: call Remove for ids which fail to be extended.
head, tail = ka.s.splitAckIDs(tail)
}
}
// A drain (once started) indicates via a channel when there is no work pending.
type drain struct {
started bool
pending bool
// Drained is closed once there are no items outstanding if Drain has been called.
Drained chan struct{}
}
// Drain starts the drain process. This cannot be undone.
func (d *drain) Drain() {
d.started = true
d.closeIfDrained()
}
// SetPending sets whether there is work pending or not. It may be called multiple times before or after Drain.
func (d *drain) SetPending(pending bool) {
d.pending = pending
d.closeIfDrained()
}
func (d *drain) closeIfDrained() {
if !d.pending && d.started {
// Check to see if d.Drained is closed before closing it.
// This allows SetPending(false) to be safely called multiple times.
select {
case <-d.Drained:
default:
close(d.Drained)
}
}
}

319
vendor/cloud.google.com/go/pubsub/keepalive_test.go generated vendored Normal file
View File

@@ -0,0 +1,319 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"reflect"
"sort"
"testing"
"time"
"golang.org/x/net/context"
)
func TestKeepAliveExtendsDeadline(t *testing.T) {
ticker := make(chan time.Time)
deadline := time.Nanosecond * 15
s := &testService{modDeadlineCalled: make(chan modDeadlineCall)}
checkModDeadlineCall := func(ackIDs []string) {
got := <-s.modDeadlineCalled
sort.Strings(got.ackIDs)
want := modDeadlineCall{
subName: "subname",
deadline: deadline,
ackIDs: ackIDs,
}
if !reflect.DeepEqual(got, want) {
t.Errorf("keepalive: got:\n%v\nwant:\n%v", got, want)
}
}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
Sub: "subname",
ExtensionTick: ticker,
Deadline: deadline,
MaxExtension: time.Hour,
}
ka.Start()
ka.Add("a")
ka.Add("b")
ticker <- time.Time{}
checkModDeadlineCall([]string{"a", "b"})
ka.Add("c")
ka.Remove("b")
ticker <- time.Time{}
checkModDeadlineCall([]string{"a", "c"})
ka.Remove("a")
ka.Remove("c")
ka.Add("d")
ticker <- time.Time{}
checkModDeadlineCall([]string{"d"})
ka.Remove("d")
ka.Stop()
}
func TestKeepAliveStopsWhenNoItem(t *testing.T) {
ticker := make(chan time.Time)
stopped := make(chan bool)
s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 3)}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
ExtensionTick: ticker,
}
ka.Start()
// There should be no call to modifyAckDeadline since there is no item.
ticker <- time.Time{}
go func() {
ka.Stop() // No items; should not block
if len(s.modDeadlineCalled) > 0 {
t.Errorf("unexpected extension to non-existent items: %v", <-s.modDeadlineCalled)
}
close(stopped)
}()
select {
case <-stopped:
case <-time.After(time.Second):
t.Errorf("keepAlive timed out waiting for stop")
}
}
func TestKeepAliveStopsWhenItemsExpired(t *testing.T) {
ticker := make(chan time.Time)
stopped := make(chan bool)
s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 2)}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
ExtensionTick: ticker,
MaxExtension: time.Duration(0), // Should expire items at the first tick.
}
ka.Start()
ka.Add("a")
ka.Add("b")
// Wait until the clock advances. Without this loop, this test fails on
// Windows because the clock doesn't advance at all between ka.Add and the
// expiration check after the tick is received.
begin := time.Now()
for time.Now().Equal(begin) {
time.Sleep(time.Millisecond)
}
// There should be no call to modifyAckDeadline since both items are expired.
ticker <- time.Time{}
go func() {
ka.Stop() // No live items; should not block.
if len(s.modDeadlineCalled) > 0 {
t.Errorf("unexpected extension to expired items")
}
close(stopped)
}()
select {
case <-stopped:
case <-time.After(time.Second):
t.Errorf("timed out waiting for stop")
}
}
func TestKeepAliveBlocksUntilAllItemsRemoved(t *testing.T) {
ticker := make(chan time.Time)
eventc := make(chan string, 3)
s := &testService{modDeadlineCalled: make(chan modDeadlineCall)}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
ExtensionTick: ticker,
MaxExtension: time.Hour, // Should not expire.
}
ka.Start()
ka.Add("a")
ka.Add("b")
go func() {
ticker <- time.Time{}
// We expect a call since both items should be extended.
select {
case args := <-s.modDeadlineCalled:
sort.Strings(args.ackIDs)
got := args.ackIDs
want := []string{"a", "b"}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want)
}
case <-time.After(time.Second):
t.Errorf("timed out waiting for deadline extend call")
}
time.Sleep(10 * time.Millisecond)
eventc <- "pre-remove-b"
// Remove one item, Stop should still be waiting.
ka.Remove("b")
ticker <- time.Time{}
// We expect a call since the item is still alive.
select {
case args := <-s.modDeadlineCalled:
got := args.ackIDs
want := []string{"a"}
if !reflect.DeepEqual(got, want) {
t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want)
}
case <-time.After(time.Second):
t.Errorf("timed out waiting for deadline extend call")
}
time.Sleep(10 * time.Millisecond)
eventc <- "pre-remove-a"
// Remove the last item so that Stop can proceed.
ka.Remove("a")
}()
go func() {
ka.Stop() // Should block all item are removed.
eventc <- "post-stop"
}()
for i, want := range []string{"pre-remove-b", "pre-remove-a", "post-stop"} {
select {
case got := <-eventc:
if got != want {
t.Errorf("event #%d:\ngot %v\nwant %v", i, got, want)
}
case <-time.After(time.Second):
t.Errorf("time out waiting for #%d event: want %v", i, want)
}
}
}
// extendCallResult contains a list of ackIDs which are expected in an ackID
// extension request, along with the result that should be returned.
type extendCallResult struct {
ackIDs []string
err error
}
// extendService implements modifyAckDeadline using a hard-coded list of extendCallResults.
type extendService struct {
service
calls []extendCallResult
t *testing.T // used for error logging.
}
func (es *extendService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
if len(es.calls) == 0 {
es.t.Fatalf("unexpected call to modifyAckDeadline: ackIDs: %v", ackIDs)
}
call := es.calls[0]
es.calls = es.calls[1:]
if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) {
es.t.Errorf("unexpected arguments to modifyAckDeadline: got: %v ; want: %v", got, want)
}
return call.err
}
// Test implementation returns the first 2 elements as head, and the rest as tail.
func (es *extendService) splitAckIDs(ids []string) ([]string, []string) {
if len(ids) < 2 {
return ids, nil
}
return ids[:2], ids[2:]
}
func TestKeepAliveSplitsBatches(t *testing.T) {
type testCase struct {
calls []extendCallResult
}
for _, tc := range []testCase{
{
calls: []extendCallResult{
{
ackIDs: []string{"a", "b"},
},
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
{
calls: []extendCallResult{
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// On error we retry once.
{
ackIDs: []string{"a", "b"},
err: errors.New("bang"),
},
// We give up after failing twice, so we move on to the next set, "c" and "d".
{
ackIDs: []string{"c", "d"},
err: errors.New("bang"),
},
// Again, we retry once.
{
ackIDs: []string{"c", "d"},
},
{
ackIDs: []string{"e", "f"},
},
},
},
} {
s := &extendService{
t: t,
calls: tc.calls,
}
ka := &keepAlive{
s: s,
Ctx: context.Background(),
Sub: "subname",
}
ka.extendDeadlines([]string{"a", "b", "c", "d", "e", "f"})
if len(s.calls) != 0 {
t.Errorf("expected extend calls did not occur: %v", s.calls)
}
}
}

View File

@@ -0,0 +1,172 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package loadtest
// Performance benchmarks for pubsub.
// Run with
// go test -bench . -cpu 1
import (
"log"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
"cloud.google.com/go/internal/testutil"
"cloud.google.com/go/pubsub"
gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)
// These constants are designed to match the "throughput" test in
// https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/run.py
// and
// https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/src/main/java/com/google/pubsub/clients/experimental/CPSPublisherTask.java
const (
nMessages = 1e5
messageSize = 10000 // size of msg data in bytes
batchSize = 10
batchDuration = 50 * time.Millisecond
serverDelay = 200 * time.Millisecond
maxOutstandingPublishes = 1600 // max_outstanding_messages in run.py
)
func BenchmarkPublishThroughput(b *testing.B) {
b.SetBytes(nMessages * messageSize)
client := perfClient(serverDelay, 1, b)
lts := &PubServer{ID: "xxx"}
lts.init(client, "t", messageSize, batchSize, batchDuration)
b.ResetTimer()
for i := 0; i < b.N; i++ {
runOnce(lts)
}
}
func runOnce(lts *PubServer) {
nRequests := int64(nMessages / batchSize)
var nPublished int64
var wg sync.WaitGroup
// The Java loadtest framework is rate-limited to 1 billion Execute calls a
// second (each Execute call corresponding to a publishBatch call here),
// but we can ignore this because of the following.
// The framework runs 10,000 threads, each calling Execute in a loop, but
// we can ignore this too.
// The framework caps the number of outstanding calls to Execute at
// maxOutstandingPublishes. That is what we simulate here.
for i := 0; i < maxOutstandingPublishes; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for atomic.AddInt64(&nRequests, -1) >= 0 {
latencies, err := lts.publishBatch()
if err != nil {
log.Fatalf("publishBatch: %v", err)
}
atomic.AddInt64(&nPublished, int64(len(latencies)))
}
}()
}
wg.Wait()
sent := atomic.LoadInt64(&nPublished)
if sent != nMessages {
log.Fatalf("sent %d messages, expected %d", sent, int(nMessages))
}
}
func perfClient(pubDelay time.Duration, nConns int, f interface {
Fatal(...interface{})
}) *pubsub.Client {
ctx := context.Background()
srv, err := newPerfServer(pubDelay)
if err != nil {
f.Fatal(err)
}
conn, err := gtransport.DialInsecure(ctx,
option.WithEndpoint(srv.Addr),
option.WithGRPCConnectionPool(nConns))
if err != nil {
f.Fatal(err)
}
client, err := pubsub.NewClient(ctx, "projectID", option.WithGRPCConn(conn))
if err != nil {
f.Fatal(err)
}
return client
}
type perfServer struct {
pb.PublisherServer
pb.SubscriberServer
Addr string
pubDelay time.Duration
mu sync.Mutex
activePubs int
maxActivePubs int
}
func newPerfServer(pubDelay time.Duration) (*perfServer, error) {
srv, err := testutil.NewServer(grpc.MaxMsgSize(pubsub.MaxPublishRequestBytes))
if err != nil {
return nil, err
}
perf := &perfServer{Addr: srv.Addr, pubDelay: pubDelay}
pb.RegisterPublisherServer(srv.Gsrv, perf)
pb.RegisterSubscriberServer(srv.Gsrv, perf)
srv.Start()
return perf, nil
}
var doLog = false
func (p *perfServer) incActivePubs(n int) (int, bool) {
p.mu.Lock()
defer p.mu.Unlock()
p.activePubs += n
newMax := false
if p.activePubs > p.maxActivePubs {
p.maxActivePubs = p.activePubs
newMax = true
}
return p.activePubs, newMax
}
func (p *perfServer) Publish(ctx context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) {
a, newMax := p.incActivePubs(1)
defer p.incActivePubs(-1)
if newMax && doLog {
log.Printf("max %d active publish calls", a)
}
if doLog {
log.Printf("%p -> Publish %d", p, len(req.Messages))
}
res := &pb.PublishResponse{MessageIds: make([]string, len(req.Messages))}
for i := range res.MessageIds {
res.MessageIds[i] = "x"
}
time.Sleep(p.pubDelay)
if doLog {
log.Printf("%p <- Publish %d", p, len(req.Messages))
}
return res, nil
}

View File

@@ -0,0 +1,54 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"log"
"net"
"strconv"
"math/rand"
"cloud.google.com/go/pubsub/loadtest"
pb "cloud.google.com/go/pubsub/loadtest/pb"
"google.golang.org/grpc"
)
func main() {
port := flag.Uint("worker_port", 6000, "port to bind worker to")
role := flag.String("r", "", "role: pub/sub")
flag.Parse()
var lts pb.LoadtestWorkerServer
switch *role {
case "pub":
lts = &loadtest.PubServer{ID: strconv.Itoa(rand.Int())}
case "sub":
lts = &loadtest.SubServer{}
default:
log.Fatalf("unknown role: %q", *role)
}
serv := grpc.NewServer()
pb.RegisterLoadtestWorkerServer(serv, lts)
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
serv.Serve(lis)
}

215
vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go generated vendored Normal file
View File

@@ -0,0 +1,215 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package loadtest implements load testing for pubsub,
// following the interface defined in https://github.com/GoogleCloudPlatform/pubsub/tree/master/load-test-framework/ .
//
// This package is experimental.
package loadtest
import (
"bytes"
"errors"
"log"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/context"
"golang.org/x/time/rate"
"github.com/golang/protobuf/ptypes"
"cloud.google.com/go/pubsub"
pb "cloud.google.com/go/pubsub/loadtest/pb"
)
type pubServerConfig struct {
topic *pubsub.Topic
msgData []byte
batchSize int32
}
type PubServer struct {
ID string
cfg atomic.Value
seqNum int32
}
func (l *PubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) {
log.Println("received start")
c, err := pubsub.NewClient(ctx, req.Project)
if err != nil {
return nil, err
}
dur, err := ptypes.Duration(req.PublishBatchDuration)
if err != nil {
return nil, err
}
l.init(c, req.Topic, req.MessageSize, req.PublishBatchSize, dur)
log.Println("started")
return &pb.StartResponse{}, nil
}
func (l *PubServer) init(c *pubsub.Client, topicName string, msgSize, batchSize int32, batchDur time.Duration) {
topic := c.Topic(topicName)
topic.PublishSettings = pubsub.PublishSettings{
DelayThreshold: batchDur,
CountThreshold: 950,
ByteThreshold: 9500000,
}
l.cfg.Store(pubServerConfig{
topic: topic,
msgData: bytes.Repeat([]byte{'A'}, int(msgSize)),
batchSize: batchSize,
})
}
func (l *PubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) {
latencies, err := l.publishBatch()
if err != nil {
log.Printf("error: %v", err)
return nil, err
}
return &pb.ExecuteResponse{Latencies: latencies}, nil
}
func (l *PubServer) publishBatch() ([]int64, error) {
var cfg pubServerConfig
if c, ok := l.cfg.Load().(pubServerConfig); ok {
cfg = c
} else {
return nil, errors.New("config not loaded")
}
start := time.Now()
latencies := make([]int64, cfg.batchSize)
startStr := strconv.FormatInt(start.UnixNano()/1e6, 10)
seqNum := atomic.AddInt32(&l.seqNum, cfg.batchSize) - cfg.batchSize
rs := make([]*pubsub.PublishResult, cfg.batchSize)
for i := int32(0); i < cfg.batchSize; i++ {
rs[i] = cfg.topic.Publish(context.TODO(), &pubsub.Message{
Data: cfg.msgData,
Attributes: map[string]string{
"sendTime": startStr,
"clientId": l.ID,
"sequenceNumber": strconv.Itoa(int(seqNum + i)),
},
})
}
for i, r := range rs {
_, err := r.Get(context.Background())
if err != nil {
return nil, err
}
// TODO(jba,pongad): fix latencies
// Later values will be skewed by earlier ones, since we wait for the
// results in order. (On the other hand, it may not matter much, since
// messages are added to bundles in order and bundles get sent more or
// less in order.) If we want more accurate values, we can either start
// a goroutine for each result (similar to the original code using a
// callback), or call reflect.Select with the Ready channels of the
// results.
latencies[i] = time.Since(start).Nanoseconds() / 1e6
}
return latencies, nil
}
type SubServer struct {
lim *rate.Limiter
mu sync.Mutex
idents []*pb.MessageIdentifier
latencies []int64
}
func (s *SubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) {
log.Println("received start")
s.lim = rate.NewLimiter(rate.Every(time.Second), 1)
c, err := pubsub.NewClient(ctx, req.Project)
if err != nil {
return nil, err
}
// Load test API doesn't define any way to stop right now.
go func() {
sub := c.Subscription(req.GetPubsubOptions().Subscription)
sub.ReceiveSettings.NumGoroutines = 10 * runtime.GOMAXPROCS(0)
err := sub.Receive(context.Background(), s.callback)
log.Fatal(err)
}()
log.Println("started")
return &pb.StartResponse{}, nil
}
func (s *SubServer) callback(_ context.Context, m *pubsub.Message) {
id, err := strconv.ParseInt(m.Attributes["clientId"], 10, 64)
if err != nil {
log.Println(err)
m.Nack()
return
}
seqNum, err := strconv.ParseInt(m.Attributes["sequenceNumber"], 10, 32)
if err != nil {
log.Println(err)
m.Nack()
return
}
sendTimeMillis, err := strconv.ParseInt(m.Attributes["sendTime"], 10, 64)
if err != nil {
log.Println(err)
m.Nack()
return
}
latency := time.Now().UnixNano()/1e6 - sendTimeMillis
ident := &pb.MessageIdentifier{
PublisherClientId: id,
SequenceNumber: int32(seqNum),
}
s.mu.Lock()
s.idents = append(s.idents, ident)
s.latencies = append(s.latencies, latency)
s.mu.Unlock()
m.Ack()
}
func (s *SubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) {
// Throttle so the load tester doesn't spam us and consume all our CPU.
if err := s.lim.Wait(ctx); err != nil {
return nil, err
}
s.mu.Lock()
idents := s.idents
s.idents = nil
latencies := s.latencies
s.latencies = nil
s.mu.Unlock()
return &pb.ExecuteResponse{
Latencies: latencies,
ReceivedMessages: idents,
}, nil
}

View File

@@ -0,0 +1,792 @@
// Code generated by protoc-gen-go.
// source: loadtest.proto
// DO NOT EDIT!
/*
Package google_pubsub_loadtest is a generated protocol buffer package.
It is generated from these files:
loadtest.proto
It has these top-level messages:
StartRequest
StartResponse
PubsubOptions
KafkaOptions
MessageIdentifier
CheckRequest
CheckResponse
ExecuteRequest
ExecuteResponse
*/
package google_pubsub_loadtest
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/duration"
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type StartRequest struct {
// The GCP project. This must be set even for Kafka, as we use it to export metrics.
Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
// The Pub/Sub or Kafka topic name.
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
// The number of requests that can be made, each second, per client.
RequestRate int32 `protobuf:"varint,3,opt,name=request_rate,json=requestRate" json:"request_rate,omitempty"`
// The size of each user message to publish
MessageSize int32 `protobuf:"varint,4,opt,name=message_size,json=messageSize" json:"message_size,omitempty"`
// The maximum outstanding requests, per client.
MaxOutstandingRequests int32 `protobuf:"varint,5,opt,name=max_outstanding_requests,json=maxOutstandingRequests" json:"max_outstanding_requests,omitempty"`
// The time at which the load test should start. If this is less than the current time, we start immediately.
StartTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
// The burn-in duration, before which results should not be reported.
BurnInDuration *google_protobuf.Duration `protobuf:"bytes,12,opt,name=burn_in_duration,json=burnInDuration" json:"burn_in_duration,omitempty"`
// The number of user messages of size message_size to publish together.
PublishBatchSize int32 `protobuf:"varint,11,opt,name=publish_batch_size,json=publishBatchSize" json:"publish_batch_size,omitempty"`
// The max duration for coalescing a batch of published messages.
PublishBatchDuration *google_protobuf.Duration `protobuf:"bytes,13,opt,name=publish_batch_duration,json=publishBatchDuration" json:"publish_batch_duration,omitempty"`
// Types that are valid to be assigned to StopConditions:
// *StartRequest_TestDuration
// *StartRequest_NumberOfMessages
StopConditions isStartRequest_StopConditions `protobuf_oneof:"stop_conditions"`
// Types that are valid to be assigned to Options:
// *StartRequest_PubsubOptions
// *StartRequest_KafkaOptions
Options isStartRequest_Options `protobuf_oneof:"options"`
}
func (m *StartRequest) Reset() { *m = StartRequest{} }
func (m *StartRequest) String() string { return proto.CompactTextString(m) }
func (*StartRequest) ProtoMessage() {}
func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type isStartRequest_StopConditions interface {
isStartRequest_StopConditions()
}
type isStartRequest_Options interface {
isStartRequest_Options()
}
type StartRequest_TestDuration struct {
TestDuration *google_protobuf.Duration `protobuf:"bytes,7,opt,name=test_duration,json=testDuration,oneof"`
}
type StartRequest_NumberOfMessages struct {
NumberOfMessages int32 `protobuf:"varint,8,opt,name=number_of_messages,json=numberOfMessages,oneof"`
}
type StartRequest_PubsubOptions struct {
PubsubOptions *PubsubOptions `protobuf:"bytes,9,opt,name=pubsub_options,json=pubsubOptions,oneof"`
}
type StartRequest_KafkaOptions struct {
KafkaOptions *KafkaOptions `protobuf:"bytes,10,opt,name=kafka_options,json=kafkaOptions,oneof"`
}
func (*StartRequest_TestDuration) isStartRequest_StopConditions() {}
func (*StartRequest_NumberOfMessages) isStartRequest_StopConditions() {}
func (*StartRequest_PubsubOptions) isStartRequest_Options() {}
func (*StartRequest_KafkaOptions) isStartRequest_Options() {}
func (m *StartRequest) GetStopConditions() isStartRequest_StopConditions {
if m != nil {
return m.StopConditions
}
return nil
}
func (m *StartRequest) GetOptions() isStartRequest_Options {
if m != nil {
return m.Options
}
return nil
}
func (m *StartRequest) GetProject() string {
if m != nil {
return m.Project
}
return ""
}
func (m *StartRequest) GetTopic() string {
if m != nil {
return m.Topic
}
return ""
}
func (m *StartRequest) GetRequestRate() int32 {
if m != nil {
return m.RequestRate
}
return 0
}
func (m *StartRequest) GetMessageSize() int32 {
if m != nil {
return m.MessageSize
}
return 0
}
func (m *StartRequest) GetMaxOutstandingRequests() int32 {
if m != nil {
return m.MaxOutstandingRequests
}
return 0
}
func (m *StartRequest) GetStartTime() *google_protobuf1.Timestamp {
if m != nil {
return m.StartTime
}
return nil
}
func (m *StartRequest) GetBurnInDuration() *google_protobuf.Duration {
if m != nil {
return m.BurnInDuration
}
return nil
}
func (m *StartRequest) GetPublishBatchSize() int32 {
if m != nil {
return m.PublishBatchSize
}
return 0
}
func (m *StartRequest) GetPublishBatchDuration() *google_protobuf.Duration {
if m != nil {
return m.PublishBatchDuration
}
return nil
}
func (m *StartRequest) GetTestDuration() *google_protobuf.Duration {
if x, ok := m.GetStopConditions().(*StartRequest_TestDuration); ok {
return x.TestDuration
}
return nil
}
func (m *StartRequest) GetNumberOfMessages() int32 {
if x, ok := m.GetStopConditions().(*StartRequest_NumberOfMessages); ok {
return x.NumberOfMessages
}
return 0
}
func (m *StartRequest) GetPubsubOptions() *PubsubOptions {
if x, ok := m.GetOptions().(*StartRequest_PubsubOptions); ok {
return x.PubsubOptions
}
return nil
}
func (m *StartRequest) GetKafkaOptions() *KafkaOptions {
if x, ok := m.GetOptions().(*StartRequest_KafkaOptions); ok {
return x.KafkaOptions
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*StartRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _StartRequest_OneofMarshaler, _StartRequest_OneofUnmarshaler, _StartRequest_OneofSizer, []interface{}{
(*StartRequest_TestDuration)(nil),
(*StartRequest_NumberOfMessages)(nil),
(*StartRequest_PubsubOptions)(nil),
(*StartRequest_KafkaOptions)(nil),
}
}
func _StartRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*StartRequest)
// stop_conditions
switch x := m.StopConditions.(type) {
case *StartRequest_TestDuration:
b.EncodeVarint(7<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.TestDuration); err != nil {
return err
}
case *StartRequest_NumberOfMessages:
b.EncodeVarint(8<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.NumberOfMessages))
case nil:
default:
return fmt.Errorf("StartRequest.StopConditions has unexpected type %T", x)
}
// options
switch x := m.Options.(type) {
case *StartRequest_PubsubOptions:
b.EncodeVarint(9<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.PubsubOptions); err != nil {
return err
}
case *StartRequest_KafkaOptions:
b.EncodeVarint(10<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.KafkaOptions); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("StartRequest.Options has unexpected type %T", x)
}
return nil
}
func _StartRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*StartRequest)
switch tag {
case 7: // stop_conditions.test_duration
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(google_protobuf.Duration)
err := b.DecodeMessage(msg)
m.StopConditions = &StartRequest_TestDuration{msg}
return true, err
case 8: // stop_conditions.number_of_messages
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.StopConditions = &StartRequest_NumberOfMessages{int32(x)}
return true, err
case 9: // options.pubsub_options
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(PubsubOptions)
err := b.DecodeMessage(msg)
m.Options = &StartRequest_PubsubOptions{msg}
return true, err
case 10: // options.kafka_options
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(KafkaOptions)
err := b.DecodeMessage(msg)
m.Options = &StartRequest_KafkaOptions{msg}
return true, err
default:
return false, nil
}
}
func _StartRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*StartRequest)
// stop_conditions
switch x := m.StopConditions.(type) {
case *StartRequest_TestDuration:
s := proto.Size(x.TestDuration)
n += proto.SizeVarint(7<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *StartRequest_NumberOfMessages:
n += proto.SizeVarint(8<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.NumberOfMessages))
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
// options
switch x := m.Options.(type) {
case *StartRequest_PubsubOptions:
s := proto.Size(x.PubsubOptions)
n += proto.SizeVarint(9<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *StartRequest_KafkaOptions:
s := proto.Size(x.KafkaOptions)
n += proto.SizeVarint(10<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type StartResponse struct {
}
func (m *StartResponse) Reset() { *m = StartResponse{} }
func (m *StartResponse) String() string { return proto.CompactTextString(m) }
func (*StartResponse) ProtoMessage() {}
func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type PubsubOptions struct {
// The Cloud Pub/Sub subscription name
Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
// The maximum number of messages to pull which each request.
MaxMessagesPerPull int32 `protobuf:"varint,2,opt,name=max_messages_per_pull,json=maxMessagesPerPull" json:"max_messages_per_pull,omitempty"`
}
func (m *PubsubOptions) Reset() { *m = PubsubOptions{} }
func (m *PubsubOptions) String() string { return proto.CompactTextString(m) }
func (*PubsubOptions) ProtoMessage() {}
func (*PubsubOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *PubsubOptions) GetSubscription() string {
if m != nil {
return m.Subscription
}
return ""
}
func (m *PubsubOptions) GetMaxMessagesPerPull() int32 {
if m != nil {
return m.MaxMessagesPerPull
}
return 0
}
type KafkaOptions struct {
// The network address of the Kafka broker.
Broker string `protobuf:"bytes,1,opt,name=broker" json:"broker,omitempty"`
// The length of time to poll for.
PollDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=poll_duration,json=pollDuration" json:"poll_duration,omitempty"`
}
func (m *KafkaOptions) Reset() { *m = KafkaOptions{} }
func (m *KafkaOptions) String() string { return proto.CompactTextString(m) }
func (*KafkaOptions) ProtoMessage() {}
func (*KafkaOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *KafkaOptions) GetBroker() string {
if m != nil {
return m.Broker
}
return ""
}
func (m *KafkaOptions) GetPollDuration() *google_protobuf.Duration {
if m != nil {
return m.PollDuration
}
return nil
}
type MessageIdentifier struct {
// The unique id of the client that published the message.
PublisherClientId int64 `protobuf:"varint,1,opt,name=publisher_client_id,json=publisherClientId" json:"publisher_client_id,omitempty"`
// Sequence number of the published message with the given publish_client_id.
SequenceNumber int32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"`
}
func (m *MessageIdentifier) Reset() { *m = MessageIdentifier{} }
func (m *MessageIdentifier) String() string { return proto.CompactTextString(m) }
func (*MessageIdentifier) ProtoMessage() {}
func (*MessageIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *MessageIdentifier) GetPublisherClientId() int64 {
if m != nil {
return m.PublisherClientId
}
return 0
}
func (m *MessageIdentifier) GetSequenceNumber() int32 {
if m != nil {
return m.SequenceNumber
}
return 0
}
type CheckRequest struct {
// Duplicate messages that should not be reported for throughput and latency.
Duplicates []*MessageIdentifier `protobuf:"bytes,1,rep,name=duplicates" json:"duplicates,omitempty"`
}
func (m *CheckRequest) Reset() { *m = CheckRequest{} }
func (m *CheckRequest) String() string { return proto.CompactTextString(m) }
func (*CheckRequest) ProtoMessage() {}
func (*CheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *CheckRequest) GetDuplicates() []*MessageIdentifier {
if m != nil {
return m.Duplicates
}
return nil
}
type CheckResponse struct {
// Histogram of latencies, each one a delta from the previous CheckResponse sent.
BucketValues []int64 `protobuf:"varint,1,rep,packed,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"`
// The duration from the start of the loadtest to its completion or now if is_finished is false.
RunningDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=running_duration,json=runningDuration" json:"running_duration,omitempty"`
// True if the load test has finished running.
IsFinished bool `protobuf:"varint,3,opt,name=is_finished,json=isFinished" json:"is_finished,omitempty"`
// MessageIdentifiers of all received messages since the last Check
ReceivedMessages []*MessageIdentifier `protobuf:"bytes,4,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"`
}
func (m *CheckResponse) Reset() { *m = CheckResponse{} }
func (m *CheckResponse) String() string { return proto.CompactTextString(m) }
func (*CheckResponse) ProtoMessage() {}
func (*CheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *CheckResponse) GetBucketValues() []int64 {
if m != nil {
return m.BucketValues
}
return nil
}
func (m *CheckResponse) GetRunningDuration() *google_protobuf.Duration {
if m != nil {
return m.RunningDuration
}
return nil
}
func (m *CheckResponse) GetIsFinished() bool {
if m != nil {
return m.IsFinished
}
return false
}
func (m *CheckResponse) GetReceivedMessages() []*MessageIdentifier {
if m != nil {
return m.ReceivedMessages
}
return nil
}
type ExecuteRequest struct {
}
func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} }
func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) }
func (*ExecuteRequest) ProtoMessage() {}
func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type ExecuteResponse struct {
// Latencies of the completed operations
Latencies []int64 `protobuf:"varint,1,rep,packed,name=latencies" json:"latencies,omitempty"`
// MessageIdentifiers of all received messages since the last Execute
ReceivedMessages []*MessageIdentifier `protobuf:"bytes,2,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"`
}
func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} }
func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) }
func (*ExecuteResponse) ProtoMessage() {}
func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *ExecuteResponse) GetLatencies() []int64 {
if m != nil {
return m.Latencies
}
return nil
}
func (m *ExecuteResponse) GetReceivedMessages() []*MessageIdentifier {
if m != nil {
return m.ReceivedMessages
}
return nil
}
func init() {
proto.RegisterType((*StartRequest)(nil), "google.pubsub.loadtest.StartRequest")
proto.RegisterType((*StartResponse)(nil), "google.pubsub.loadtest.StartResponse")
proto.RegisterType((*PubsubOptions)(nil), "google.pubsub.loadtest.PubsubOptions")
proto.RegisterType((*KafkaOptions)(nil), "google.pubsub.loadtest.KafkaOptions")
proto.RegisterType((*MessageIdentifier)(nil), "google.pubsub.loadtest.MessageIdentifier")
proto.RegisterType((*CheckRequest)(nil), "google.pubsub.loadtest.CheckRequest")
proto.RegisterType((*CheckResponse)(nil), "google.pubsub.loadtest.CheckResponse")
proto.RegisterType((*ExecuteRequest)(nil), "google.pubsub.loadtest.ExecuteRequest")
proto.RegisterType((*ExecuteResponse)(nil), "google.pubsub.loadtest.ExecuteResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Loadtest service
type LoadtestClient interface {
// Starts a load test
Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error)
// Checks the status of a load test
Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error)
}
type loadtestClient struct {
cc *grpc.ClientConn
}
func NewLoadtestClient(cc *grpc.ClientConn) LoadtestClient {
return &loadtestClient{cc}
}
func (c *loadtestClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
out := new(StartResponse)
err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Start", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *loadtestClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) {
out := new(CheckResponse)
err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Check", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Loadtest service
type LoadtestServer interface {
// Starts a load test
Start(context.Context, *StartRequest) (*StartResponse, error)
// Checks the status of a load test
Check(context.Context, *CheckRequest) (*CheckResponse, error)
}
func RegisterLoadtestServer(s *grpc.Server, srv LoadtestServer) {
s.RegisterService(&_Loadtest_serviceDesc, srv)
}
func _Loadtest_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LoadtestServer).Start(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.pubsub.loadtest.Loadtest/Start",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LoadtestServer).Start(ctx, req.(*StartRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Loadtest_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LoadtestServer).Check(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.pubsub.loadtest.Loadtest/Check",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LoadtestServer).Check(ctx, req.(*CheckRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Loadtest_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.pubsub.loadtest.Loadtest",
HandlerType: (*LoadtestServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Start",
Handler: _Loadtest_Start_Handler,
},
{
MethodName: "Check",
Handler: _Loadtest_Check_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "loadtest.proto",
}
// Client API for LoadtestWorker service
type LoadtestWorkerClient interface {
// Starts a worker
Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error)
// Executes a command on the worker, returning the latencies of the operations. Since some
// commands consist of multiple operations (i.e. pulls contain many received messages with
// different end to end latencies) a single command can have multiple latencies returned.
Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error)
}
type loadtestWorkerClient struct {
cc *grpc.ClientConn
}
func NewLoadtestWorkerClient(cc *grpc.ClientConn) LoadtestWorkerClient {
return &loadtestWorkerClient{cc}
}
func (c *loadtestWorkerClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
out := new(StartResponse)
err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Start", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *loadtestWorkerClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) {
out := new(ExecuteResponse)
err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Execute", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for LoadtestWorker service
type LoadtestWorkerServer interface {
// Starts a worker
Start(context.Context, *StartRequest) (*StartResponse, error)
// Executes a command on the worker, returning the latencies of the operations. Since some
// commands consist of multiple operations (i.e. pulls contain many received messages with
// different end to end latencies) a single command can have multiple latencies returned.
Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error)
}
func RegisterLoadtestWorkerServer(s *grpc.Server, srv LoadtestWorkerServer) {
s.RegisterService(&_LoadtestWorker_serviceDesc, srv)
}
func _LoadtestWorker_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StartRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LoadtestWorkerServer).Start(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Start",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LoadtestWorkerServer).Start(ctx, req.(*StartRequest))
}
return interceptor(ctx, in, info, handler)
}
func _LoadtestWorker_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExecuteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LoadtestWorkerServer).Execute(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Execute",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LoadtestWorkerServer).Execute(ctx, req.(*ExecuteRequest))
}
return interceptor(ctx, in, info, handler)
}
var _LoadtestWorker_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.pubsub.loadtest.LoadtestWorker",
HandlerType: (*LoadtestWorkerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Start",
Handler: _LoadtestWorker_Start_Handler,
},
{
MethodName: "Execute",
Handler: _LoadtestWorker_Execute_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "loadtest.proto",
}
func init() { proto.RegisterFile("loadtest.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 847 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xdd, 0x6e, 0xdc, 0x44,
0x14, 0xae, 0x93, 0x6e, 0x92, 0x3d, 0x6b, 0xef, 0x6e, 0x86, 0x12, 0x99, 0x15, 0xd0, 0x60, 0x28,
0x0d, 0x12, 0x72, 0x45, 0xb8, 0x81, 0x1b, 0x84, 0x92, 0x82, 0x12, 0x15, 0x9a, 0xc8, 0x8d, 0x8a,
0xe0, 0x66, 0x34, 0xb6, 0x67, 0x93, 0x61, 0xed, 0x19, 0x33, 0x3f, 0x55, 0xd4, 0x17, 0xe0, 0x8d,
0x78, 0x00, 0x1e, 0x87, 0x5b, 0x5e, 0x00, 0xcd, 0x78, 0xbc, 0x3f, 0x6d, 0x57, 0x0b, 0x42, 0xbd,
0x3c, 0xdf, 0xf9, 0xce, 0x37, 0xe7, 0xd7, 0x86, 0x61, 0x25, 0x48, 0xa9, 0xa9, 0xd2, 0x69, 0x23,
0x85, 0x16, 0xe8, 0xe0, 0x5a, 0x88, 0xeb, 0x8a, 0xa6, 0x8d, 0xc9, 0x95, 0xc9, 0xd3, 0xce, 0x3b,
0xf9, 0xb0, 0xc5, 0x1f, 0x39, 0x56, 0x6e, 0xa6, 0x8f, 0x4a, 0x23, 0x89, 0x66, 0x82, 0xb7, 0x71,
0x93, 0xfb, 0xaf, 0xfa, 0x35, 0xab, 0xa9, 0xd2, 0xa4, 0x6e, 0x5a, 0x42, 0xf2, 0x57, 0x0f, 0xc2,
0x67, 0x9a, 0x48, 0x9d, 0xd1, 0xdf, 0x0c, 0x55, 0x1a, 0xc5, 0xb0, 0xdb, 0x48, 0xf1, 0x2b, 0x2d,
0x74, 0x1c, 0x1c, 0x06, 0x47, 0xfd, 0xac, 0x33, 0xd1, 0x3d, 0xe8, 0x69, 0xd1, 0xb0, 0x22, 0xde,
0x72, 0x78, 0x6b, 0xa0, 0x8f, 0x20, 0x94, 0x6d, 0x28, 0x96, 0x44, 0xd3, 0x78, 0xfb, 0x30, 0x38,
0xea, 0x65, 0x03, 0x8f, 0x65, 0x44, 0x53, 0x4b, 0xa9, 0xa9, 0x52, 0xe4, 0x9a, 0x62, 0xc5, 0x5e,
0xd2, 0xf8, 0x6e, 0x4b, 0xf1, 0xd8, 0x33, 0xf6, 0x92, 0xa2, 0xaf, 0x20, 0xae, 0xc9, 0x2d, 0x16,
0x46, 0x2b, 0x4d, 0x78, 0xc9, 0xf8, 0x35, 0xf6, 0x0a, 0x2a, 0xee, 0x39, 0xfa, 0x41, 0x4d, 0x6e,
0x2f, 0x16, 0x6e, 0x9f, 0xae, 0x42, 0x5f, 0x03, 0x28, 0x9b, 0x3f, 0xb6, 0x95, 0xc5, 0x3b, 0x87,
0xc1, 0xd1, 0xe0, 0x78, 0x92, 0x76, 0xed, 0xf2, 0x65, 0xa7, 0x57, 0x5d, 0xd9, 0x59, 0xdf, 0xb1,
0xad, 0x8d, 0x4e, 0x61, 0x9c, 0x1b, 0xc9, 0x31, 0xe3, 0xb8, 0x6b, 0x5b, 0x1c, 0x3a, 0x81, 0xf7,
0x5e, 0x13, 0x78, 0xec, 0x09, 0xd9, 0xd0, 0x86, 0x9c, 0xf3, 0xce, 0x46, 0x9f, 0x03, 0x6a, 0x4c,
0x5e, 0x31, 0x75, 0x83, 0x73, 0xa2, 0x8b, 0x9b, 0xb6, 0xc4, 0x81, 0xcb, 0x79, 0xec, 0x3d, 0x27,
0xd6, 0xe1, 0xea, 0xbc, 0x80, 0x83, 0x55, 0xf6, 0xfc, 0xe1, 0x68, 0xd3, 0xc3, 0xf7, 0x96, 0xc5,
0xe6, 0xcf, 0x7f, 0x0b, 0x91, 0x5d, 0x84, 0x85, 0xce, 0xee, 0x06, 0x9d, 0xb3, 0x3b, 0x59, 0x68,
0x23, 0xe6, 0x0a, 0x29, 0x20, 0x6e, 0xea, 0x9c, 0x4a, 0x2c, 0xa6, 0xd8, 0xcf, 0x44, 0xc5, 0x7b,
0xb6, 0x80, 0xb3, 0x3b, 0xd9, 0xb8, 0xf5, 0x5d, 0x4c, 0x7f, 0xf4, 0x1e, 0xf4, 0x14, 0x86, 0xed,
0x16, 0x62, 0xd1, 0x58, 0x01, 0x15, 0xf7, 0xdd, 0x93, 0x0f, 0xd2, 0x37, 0xef, 0x68, 0x7a, 0xe9,
0xec, 0x8b, 0x96, 0x7c, 0x16, 0x64, 0x51, 0xb3, 0x0c, 0xa0, 0x27, 0x10, 0xcd, 0xc8, 0x74, 0x46,
0xe6, 0x72, 0xe0, 0xe4, 0x3e, 0x59, 0x27, 0xf7, 0xc4, 0x92, 0x17, 0x6a, 0xe1, 0x6c, 0xc9, 0x3e,
0xd9, 0x87, 0x91, 0xd2, 0xa2, 0xc1, 0x85, 0xe0, 0x25, 0x6b, 0xa1, 0x3e, 0xec, 0x7a, 0xe5, 0x64,
0x04, 0x91, 0xdf, 0x75, 0xd5, 0x08, 0xae, 0x68, 0x32, 0x85, 0x68, 0x25, 0x3b, 0x94, 0x40, 0xa8,
0x4c, 0xae, 0x0a, 0xc9, 0x1c, 0xe0, 0x4f, 0x60, 0x05, 0x43, 0x5f, 0xc0, 0xbb, 0x76, 0x57, 0xbb,
0x56, 0xe1, 0x86, 0x4a, 0xdc, 0x98, 0xaa, 0x72, 0x77, 0xd1, 0xcb, 0x50, 0x4d, 0x6e, 0xbb, 0x66,
0x5d, 0x52, 0x79, 0x69, 0xaa, 0x2a, 0x99, 0x42, 0xb8, 0x9c, 0x36, 0x3a, 0x80, 0x9d, 0x5c, 0x8a,
0x19, 0x95, 0xfe, 0x01, 0x6f, 0xa1, 0x6f, 0x20, 0x6a, 0x44, 0x55, 0x2d, 0xa6, 0xb9, 0xb5, 0x69,
0x2b, 0x42, 0xcb, 0xef, 0xac, 0xa4, 0x82, 0x7d, 0xff, 0xf4, 0x79, 0x49, 0xb9, 0x66, 0x53, 0x46,
0x25, 0x4a, 0xe1, 0x1d, 0xbf, 0x3a, 0x54, 0xe2, 0xa2, 0x62, 0x94, 0x6b, 0xcc, 0x4a, 0xf7, 0xf2,
0x76, 0xb6, 0x3f, 0x77, 0x9d, 0x3a, 0xcf, 0x79, 0x89, 0x1e, 0xc2, 0x48, 0xd9, 0xeb, 0xe2, 0x05,
0xc5, 0xed, 0xf4, 0x7d, 0x65, 0xc3, 0x0e, 0x7e, 0xea, 0xd0, 0xe4, 0x67, 0x08, 0x4f, 0x6f, 0x68,
0x31, 0xeb, 0x3e, 0x1d, 0xe7, 0x00, 0xa5, 0x69, 0x2a, 0x56, 0x10, 0x4d, 0x55, 0x1c, 0x1c, 0x6e,
0x1f, 0x0d, 0x8e, 0x3f, 0x5b, 0x37, 0xc6, 0xd7, 0xf2, 0xcc, 0x96, 0x82, 0x93, 0xbf, 0x03, 0x88,
0xbc, 0x76, 0x3b, 0x2a, 0xf4, 0x31, 0x44, 0xb9, 0x29, 0x66, 0x54, 0xe3, 0x17, 0xa4, 0x32, 0x5e,
0x7f, 0x3b, 0x0b, 0x5b, 0xf0, 0xb9, 0xc3, 0xd0, 0x63, 0x18, 0x4b, 0xc3, 0xb9, 0xfd, 0x7c, 0xfc,
0xfb, 0x16, 0x8e, 0x7c, 0xc8, 0xfc, 0x22, 0xee, 0xc3, 0x80, 0x29, 0x3c, 0x65, 0xdc, 0xf6, 0xa5,
0x74, 0x5f, 0xb4, 0xbd, 0x0c, 0x98, 0xfa, 0xde, 0x23, 0xe8, 0x39, 0xec, 0x4b, 0x5a, 0x50, 0xf6,
0x82, 0x96, 0x8b, 0x8b, 0xb9, 0xfb, 0x5f, 0xeb, 0x1d, 0x77, 0x1a, 0xdd, 0xb6, 0x24, 0x63, 0x18,
0x7e, 0x77, 0x4b, 0x0b, 0xa3, 0xa9, 0x6f, 0x69, 0xf2, 0x7b, 0x00, 0xa3, 0x39, 0xe4, 0x3b, 0xf1,
0x3e, 0xf4, 0x2b, 0xa2, 0x29, 0x2f, 0xd8, 0xbc, 0x0b, 0x0b, 0xe0, 0xcd, 0xb9, 0x6d, 0xfd, 0xef,
0xdc, 0x8e, 0xff, 0x08, 0x60, 0xef, 0x07, 0x1f, 0x80, 0xae, 0xa0, 0xe7, 0x0e, 0x09, 0xad, 0xbd,
0xd2, 0xe5, 0x7f, 0xca, 0xe4, 0xc1, 0x06, 0x96, 0x2f, 0xec, 0x0a, 0x7a, 0x6e, 0xe6, 0xeb, 0x55,
0x97, 0xd7, 0x6d, 0xbd, 0xea, 0xca, 0xe2, 0x1c, 0xff, 0x19, 0xc0, 0xb0, 0x4b, 0xfc, 0x27, 0x21,
0xed, 0x99, 0xbd, 0x9d, 0xf4, 0x7f, 0x81, 0x5d, 0x3f, 0x2a, 0xf4, 0xe9, 0xba, 0x88, 0xd5, 0xf1,
0x4e, 0x1e, 0x6e, 0xe4, 0xb5, 0xda, 0x27, 0x29, 0x7c, 0x50, 0x88, 0xfa, 0x15, 0xf6, 0xb4, 0x62,
0x45, 0x5a, 0x88, 0xba, 0x16, 0xfc, 0x24, 0xea, 0x4a, 0xbc, 0x74, 0xfb, 0xbd, 0xe3, 0xd6, 0xfc,
0xcb, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xfc, 0xdc, 0x27, 0x48, 0x08, 0x00, 0x00,
}

97
vendor/cloud.google.com/go/pubsub/message.go generated vendored Normal file
View File

@@ -0,0 +1,97 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"time"
"github.com/golang/protobuf/ptypes"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)
// Message represents a Pub/Sub message.
type Message struct {
// ID identifies this message.
// This ID is assigned by the server and is populated for Messages obtained from a subscription.
// This field is read-only.
ID string
// Data is the actual data in the message.
Data []byte
// Attributes represents the key-value pairs the current message
// is labelled with.
Attributes map[string]string
// ackID is the identifier to acknowledge this message.
ackID string
// The time at which the message was published.
// This is populated by the server for Messages obtained from a subscription.
// This field is read-only.
PublishTime time.Time
// size is the approximate size of the message's data and attributes.
size int
calledDone bool
// The done method of the iterator that created this Message.
doneFunc func(string, bool)
}
func toMessage(resp *pb.ReceivedMessage) (*Message, error) {
if resp.Message == nil {
return &Message{ackID: resp.AckId}, nil
}
pubTime, err := ptypes.Timestamp(resp.Message.PublishTime)
if err != nil {
return nil, err
}
return &Message{
ackID: resp.AckId,
Data: resp.Message.Data,
Attributes: resp.Message.Attributes,
ID: resp.Message.MessageId,
PublishTime: pubTime,
}, nil
}
// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback.
// It should not be called on any other Message value.
// If message acknowledgement fails, the Message will be redelivered.
// Client code must call Ack or Nack when finished for each received Message.
// Calls to Ack or Nack have no effect after the first call.
func (m *Message) Ack() {
m.done(true)
}
// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback.
// It should not be called on any other Message value.
// Nack will result in the Message being redelivered more quickly than if it were allowed to expire.
// Client code must call Ack or Nack when finished for each received Message.
// Calls to Ack or Nack have no effect after the first call.
func (m *Message) Nack() {
m.done(false)
}
func (m *Message) done(ack bool) {
if m.calledDone {
return
}
m.calledDone = true
m.doneFunc(m.ackID, ack)
}

139
vendor/cloud.google.com/go/pubsub/pubsub.go generated vendored Normal file
View File

@@ -0,0 +1,139 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub // import "cloud.google.com/go/pubsub"
import (
"fmt"
"os"
"runtime"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/grpc"
"golang.org/x/net/context"
)
const (
// ScopePubSub grants permissions to view and manage Pub/Sub
// topics and subscriptions.
ScopePubSub = "https://www.googleapis.com/auth/pubsub"
// ScopeCloudPlatform grants permissions to view and manage your data
// across Google Cloud Platform services.
ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform"
)
const prodAddr = "https://pubsub.googleapis.com/"
// Client is a Google Pub/Sub client scoped to a single project.
//
// Clients should be reused rather than being created as needed.
// A Client may be shared by multiple goroutines.
type Client struct {
projectID string
s service
}
// NewClient creates a new PubSub client.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
var o []option.ClientOption
// Environment variables for gcloud emulator:
// https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/
if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
// Create multiple connections to increase throughput.
option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)),
}
}
o = append(o, opts...)
s, err := newPubSubService(ctx, o)
if err != nil {
return nil, fmt.Errorf("constructing pubsub client: %v", err)
}
c := &Client{
projectID: projectID,
s: s,
}
return c, nil
}
// Close closes any resources held by the client.
//
// Close need not be called at program exit.
func (c *Client) Close() error {
return c.s.close()
}
func (c *Client) fullyQualifiedProjectName() string {
return fmt.Sprintf("projects/%s", c.projectID)
}
// pageToken stores the next page token for a server response which is split over multiple pages.
type pageToken struct {
tok string
explicit bool
}
func (pt *pageToken) set(tok string) {
pt.tok = tok
pt.explicit = true
}
func (pt *pageToken) get() string {
return pt.tok
}
// more returns whether further pages should be fetched from the server.
func (pt *pageToken) more() bool {
return pt.tok != "" || !pt.explicit
}
// stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings.
type stringsIterator struct {
ctx context.Context
strings []string
token pageToken
fetch func(ctx context.Context, tok string) (*stringsPage, error)
}
// Next returns the next string. If there are no more strings, iterator.Done will be returned.
func (si *stringsIterator) Next() (string, error) {
for len(si.strings) == 0 && si.token.more() {
page, err := si.fetch(si.ctx, si.token.get())
if err != nil {
return "", err
}
si.token.set(page.tok)
si.strings = page.strings
}
if len(si.strings) == 0 {
return "", iterator.Done
}
s := si.strings[0]
si.strings = si.strings[1:]
return s, nil
}

115
vendor/cloud.google.com/go/pubsub/puller.go generated vendored Normal file
View File

@@ -0,0 +1,115 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"sync"
"golang.org/x/net/context"
)
// puller fetches messages from the server in a batch.
type puller struct {
ctx context.Context
cancel context.CancelFunc
// keepAlive takes ownership of the lifetime of the message identified
// by ackID, ensuring that its ack deadline does not expire. It should
// be called each time a new message is fetched from the server, even
// if it is not yet returned from Next.
keepAlive func(ackID string)
// abandon should be called for each message which has previously been
// passed to keepAlive, but will never be returned by Next.
abandon func(ackID string)
// fetch fetches a batch of messages from the server.
fetch func() ([]*Message, error)
mu sync.Mutex
buf []*Message
}
// newPuller constructs a new puller.
// batchSize is the maximum number of messages to fetch at once.
// No more than batchSize messages will be outstanding at any time.
func newPuller(s service, subName string, ctx context.Context, batchSize int32, keepAlive, abandon func(ackID string)) *puller {
ctx, cancel := context.WithCancel(ctx)
return &puller{
cancel: cancel,
keepAlive: keepAlive,
abandon: abandon,
ctx: ctx,
fetch: func() ([]*Message, error) { return s.fetchMessages(ctx, subName, batchSize) },
}
}
const maxPullAttempts = 2
// Next returns the next message from the server, fetching a new batch if necessary.
// keepAlive is called with the ackIDs of newly fetched messages.
// If p.Ctx has already been cancelled before Next is called, no new messages
// will be fetched.
func (p *puller) Next() (*Message, error) {
p.mu.Lock()
defer p.mu.Unlock()
// If ctx has been cancelled, return straight away (even if there are buffered messages available).
select {
case <-p.ctx.Done():
return nil, p.ctx.Err()
default:
}
for len(p.buf) == 0 {
var buf []*Message
var err error
for i := 0; i < maxPullAttempts; i++ {
// Once Stop has completed, all future calls to Next will immediately fail at this point.
buf, err = p.fetch()
if err == nil || err == context.Canceled || err == context.DeadlineExceeded {
break
}
}
if err != nil {
return nil, err
}
for _, m := range buf {
p.keepAlive(m.ackID)
}
p.buf = buf
}
m := p.buf[0]
p.buf = p.buf[1:]
return m, nil
}
// Stop aborts any pending calls to Next, and prevents any future ones from succeeding.
// Stop also abandons any messages that have been pre-fetched.
// Once Stop completes, no calls to Next will succeed.
func (p *puller) Stop() {
// Next may be executing in another goroutine. Cancel it, and then wait until it terminates.
p.cancel()
p.mu.Lock()
defer p.mu.Unlock()
for _, m := range p.buf {
p.abandon(m.ackID)
}
p.buf = nil
}

154
vendor/cloud.google.com/go/pubsub/puller_test.go generated vendored Normal file
View File

@@ -0,0 +1,154 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"reflect"
"testing"
"golang.org/x/net/context"
)
type fetchResult struct {
msgs []*Message
err error
}
type fetcherService struct {
service
results []fetchResult
unexpectedCall bool
}
func (s *fetcherService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
if len(s.results) == 0 {
s.unexpectedCall = true
return nil, errors.New("bang")
}
ret := s.results[0]
s.results = s.results[1:]
return ret.msgs, ret.err
}
func TestPuller(t *testing.T) {
s := &fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
{},
{
msgs: []*Message{{ackID: "c"}, {ackID: "d"}},
},
{
msgs: []*Message{{ackID: "e"}},
},
},
}
pulled := make(chan string, 10)
pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {})
got := []string{}
for i := 0; i < 5; i++ {
m, err := pull.Next()
got = append(got, m.ackID)
if err != nil {
t.Errorf("unexpected err from pull.Next: %v", err)
}
}
_, err := pull.Next()
if err == nil {
t.Errorf("unexpected err from pull.Next: %v", err)
}
want := []string{"a", "b", "c", "d", "e"}
if !reflect.DeepEqual(got, want) {
t.Errorf("pulled ack ids: got: %v ; want: %v", got, want)
}
}
func TestPullerAddsToKeepAlive(t *testing.T) {
s := &fetcherService{
results: []fetchResult{
{
msgs: []*Message{{ackID: "a"}, {ackID: "b"}},
},
{
msgs: []*Message{{ackID: "c"}, {ackID: "d"}},
},
},
}
pulled := make(chan string, 10)
pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {})
got := []string{}
for i := 0; i < 3; i++ {
m, err := pull.Next()
got = append(got, m.ackID)
if err != nil {
t.Errorf("unexpected err from pull.Next: %v", err)
}
}
want := []string{"a", "b", "c"}
if !reflect.DeepEqual(got, want) {
t.Errorf("pulled ack ids: got: %v ; want: %v", got, want)
}
close(pulled)
// We should have seen "d" written to the channel too, even though it hasn't been returned yet.
pulledIDs := []string{}
for id := range pulled {
pulledIDs = append(pulledIDs, id)
}
want = append(want, "d")
if !reflect.DeepEqual(pulledIDs, want) {
t.Errorf("pulled ack ids: got: %v ; want: %v", pulledIDs, want)
}
}
func TestPullerRetriesOnce(t *testing.T) {
bang := errors.New("bang")
s := &fetcherService{
results: []fetchResult{
{
err: bang,
},
{
err: bang,
},
},
}
pull := newPuller(s, "subname", context.Background(), 2, func(string) {}, func(string) {})
_, err := pull.Next()
if err != bang {
t.Errorf("pull.Next err got: %v, want: %v", err, bang)
}
if s.unexpectedCall {
t.Errorf("unexpected retry")
}
if len(s.results) != 0 {
t.Errorf("outstanding calls: got: %v, want: 0", len(s.results))
}
}

598
vendor/cloud.google.com/go/pubsub/service.go generated vendored Normal file
View File

@@ -0,0 +1,598 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"fmt"
"math"
"strings"
"sync"
"time"
"github.com/golang/protobuf/ptypes"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/version"
vkit "cloud.google.com/go/pubsub/apiv1"
durpb "github.com/golang/protobuf/ptypes/duration"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type nextStringFunc func() (string, error)
type nextSnapshotFunc func() (*snapshotConfig, error)
// service provides an internal abstraction to isolate the generated
// PubSub API; most of this package uses this interface instead.
// The single implementation, *apiService, contains all the knowledge
// of the generated PubSub API (except for that present in legacy code).
type service interface {
createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error
getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error)
listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc
deleteSubscription(ctx context.Context, name string) error
subscriptionExists(ctx context.Context, name string) (bool, error)
modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error
createTopic(ctx context.Context, name string) error
deleteTopic(ctx context.Context, name string) error
topicExists(ctx context.Context, name string) (bool, error)
listProjectTopics(ctx context.Context, projName string) nextStringFunc
listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc
modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error
fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error)
publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error)
// splitAckIDs divides ackIDs into
// * a batch of a size which is suitable for passing to acknowledge or
// modifyAckDeadline, and
// * the rest.
splitAckIDs(ackIDs []string) ([]string, []string)
// acknowledge ACKs the IDs in ackIDs.
acknowledge(ctx context.Context, subName string, ackIDs []string) error
iamHandle(resourceName string) *iam.Handle
newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller
createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error)
deleteSnapshot(ctx context.Context, snapName string) error
listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc
// TODO(pongad): Raw proto returns an empty SeekResponse; figure out if we want to return it before GA.
seekToTime(ctx context.Context, subName string, t time.Time) error
seekToSnapshot(ctx context.Context, subName, snapName string) error
close() error
}
type apiService struct {
pubc *vkit.PublisherClient
subc *vkit.SubscriberClient
}
func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiService, error) {
pubc, err := vkit.NewPublisherClient(ctx, opts...)
if err != nil {
return nil, err
}
subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection()))
if err != nil {
_ = pubc.Close() // ignore error
return nil, err
}
pubc.SetGoogleClientInfo("gccl", version.Repo)
subc.SetGoogleClientInfo("gccl", version.Repo)
return &apiService{pubc: pubc, subc: subc}, nil
}
func (s *apiService) close() error {
// Return the first error, because the first call closes the connection.
err := s.pubc.Close()
_ = s.subc.Close()
return err
}
func (s *apiService) createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error {
var rawPushConfig *pb.PushConfig
if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 {
rawPushConfig = &pb.PushConfig{
Attributes: cfg.PushConfig.Attributes,
PushEndpoint: cfg.PushConfig.Endpoint,
}
}
var retentionDuration *durpb.Duration
if cfg.retentionDuration != 0 {
retentionDuration = ptypes.DurationProto(cfg.retentionDuration)
}
_, err := s.subc.CreateSubscription(ctx, &pb.Subscription{
Name: subName,
Topic: cfg.Topic.name,
PushConfig: rawPushConfig,
AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())),
RetainAckedMessages: cfg.retainAckedMessages,
MessageRetentionDuration: retentionDuration,
})
return err
}
func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error) {
rawSub, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: subName})
if err != nil {
return SubscriptionConfig{}, "", err
}
var rd time.Duration
// TODO(pongad): Remove nil-check after white list is removed.
if rawSub.MessageRetentionDuration != nil {
if rd, err = ptypes.Duration(rawSub.MessageRetentionDuration); err != nil {
return SubscriptionConfig{}, "", err
}
}
sub := SubscriptionConfig{
AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds),
PushConfig: PushConfig{
Endpoint: rawSub.PushConfig.PushEndpoint,
Attributes: rawSub.PushConfig.Attributes,
},
retainAckedMessages: rawSub.RetainAckedMessages,
retentionDuration: rd,
}
return sub, rawSub.Topic, nil
}
// stringsPage contains a list of strings and a token for fetching the next page.
type stringsPage struct {
strings []string
tok string
}
func (s *apiService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc {
it := s.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{
Project: projName,
})
return func() (string, error) {
sub, err := it.Next()
if err != nil {
return "", err
}
return sub.Name, nil
}
}
func (s *apiService) deleteSubscription(ctx context.Context, name string) error {
return s.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: name})
}
func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) {
_, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: name})
if err == nil {
return true, nil
}
if grpc.Code(err) == codes.NotFound {
return false, nil
}
return false, err
}
func (s *apiService) createTopic(ctx context.Context, name string) error {
_, err := s.pubc.CreateTopic(ctx, &pb.Topic{Name: name})
return err
}
func (s *apiService) listProjectTopics(ctx context.Context, projName string) nextStringFunc {
it := s.pubc.ListTopics(ctx, &pb.ListTopicsRequest{
Project: projName,
})
return func() (string, error) {
topic, err := it.Next()
if err != nil {
return "", err
}
return topic.Name, nil
}
}
func (s *apiService) deleteTopic(ctx context.Context, name string) error {
return s.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: name})
}
func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) {
_, err := s.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: name})
if err == nil {
return true, nil
}
if grpc.Code(err) == codes.NotFound {
return false, nil
}
return false, err
}
func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc {
it := s.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{
Topic: topicName,
})
return it.Next
}
func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
return s.subc.ModifyAckDeadline(ctx, &pb.ModifyAckDeadlineRequest{
Subscription: subName,
AckIds: ackIDs,
AckDeadlineSeconds: trunc32(int64(deadline.Seconds())),
})
}
// maxPayload is the maximum number of bytes to devote to actual ids in
// acknowledgement or modifyAckDeadline requests. A serialized
// AcknowledgeRequest proto has a small constant overhead, plus the size of the
// subscription name, plus 3 bytes per ID (a tag byte and two size bytes). A
// ModifyAckDeadlineRequest has an additional few bytes for the deadline. We
// don't know the subscription name here, so we just assume the size exclusive
// of ids is 100 bytes.
//
// With gRPC there is no way for the client to know the server's max message size (it is
// configurable on the server). We know from experience that it
// it 512K.
const (
maxPayload = 512 * 1024
reqFixedOverhead = 100
overheadPerID = 3
maxSendRecvBytes = 20 * 1024 * 1024 // 20M
)
// splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data.
func (s *apiService) splitAckIDs(ids []string) ([]string, []string) {
total := reqFixedOverhead
for i, id := range ids {
total += len(id) + overheadPerID
if total > maxPayload {
return ids[:i], ids[i:]
}
}
return ids, nil
}
func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error {
return s.subc.Acknowledge(ctx, &pb.AcknowledgeRequest{
Subscription: subName,
AckIds: ackIDs,
})
}
func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) {
resp, err := s.subc.Pull(ctx, &pb.PullRequest{
Subscription: subName,
MaxMessages: maxMessages,
}, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
if err != nil {
return nil, err
}
return convertMessages(resp.ReceivedMessages)
}
func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) {
msgs := make([]*Message, 0, len(rms))
for i, m := range rms {
msg, err := toMessage(m)
if err != nil {
return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m)
}
msgs = append(msgs, msg)
}
return msgs, nil
}
func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) {
rawMsgs := make([]*pb.PubsubMessage, len(msgs))
for i, msg := range msgs {
rawMsgs[i] = &pb.PubsubMessage{
Data: msg.Data,
Attributes: msg.Attributes,
}
}
resp, err := s.pubc.Publish(ctx, &pb.PublishRequest{
Topic: topicName,
Messages: rawMsgs,
}, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes)))
if err != nil {
return nil, err
}
return resp.MessageIds, nil
}
func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error {
return s.subc.ModifyPushConfig(ctx, &pb.ModifyPushConfigRequest{
Subscription: subName,
PushConfig: &pb.PushConfig{
Attributes: conf.Attributes,
PushEndpoint: conf.Endpoint,
},
})
}
func (s *apiService) iamHandle(resourceName string) *iam.Handle {
return iam.InternalNewHandle(s.pubc.Connection(), resourceName)
}
func trunc32(i int64) int32 {
if i > math.MaxInt32 {
i = math.MaxInt32
}
return int32(i)
}
func (s *apiService) newStreamingPuller(ctx context.Context, subName string, ackDeadlineSecs int32) *streamingPuller {
p := &streamingPuller{
ctx: ctx,
subName: subName,
ackDeadlineSecs: ackDeadlineSecs,
subc: s.subc,
}
p.c = sync.NewCond(&p.mu)
return p
}
type streamingPuller struct {
ctx context.Context
subName string
ackDeadlineSecs int32
subc *vkit.SubscriberClient
mu sync.Mutex
c *sync.Cond
inFlight bool
closed bool // set after CloseSend called
spc pb.Subscriber_StreamingPullClient
err error
}
// open establishes (or re-establishes) a stream for pulling messages.
// It takes care that only one RPC is in flight at a time.
func (p *streamingPuller) open() error {
p.c.L.Lock()
defer p.c.L.Unlock()
p.openLocked()
return p.err
}
func (p *streamingPuller) openLocked() {
if p.inFlight {
// Another goroutine is opening; wait for it.
for p.inFlight {
p.c.Wait()
}
return
}
// No opens in flight; start one.
// Keep the lock held, to avoid a race where we
// close the old stream while opening a new one.
p.inFlight = true
spc, err := p.subc.StreamingPull(p.ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
if err == nil {
err = spc.Send(&pb.StreamingPullRequest{
Subscription: p.subName,
StreamAckDeadlineSeconds: p.ackDeadlineSecs,
})
}
p.spc = spc
p.err = err
p.inFlight = false
p.c.Broadcast()
}
func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error {
p.c.L.Lock()
defer p.c.L.Unlock()
// Wait for an open in flight.
for p.inFlight {
p.c.Wait()
}
var err error
var bo gax.Backoff
for {
select {
case <-p.ctx.Done():
p.err = p.ctx.Err()
default:
}
if p.err != nil {
return p.err
}
spc := p.spc
// Do not call f with the lock held. Only one goroutine calls Send
// (streamingMessageIterator.sender) and only one calls Recv
// (streamingMessageIterator.receiver). If we locked, then a
// blocked Recv would prevent a Send from happening.
p.c.L.Unlock()
err = f(spc)
p.c.L.Lock()
if !p.closed && err != nil && isRetryable(err) {
// Sleep with exponential backoff. Normally we wouldn't hold the lock while sleeping,
// but here it can't do any harm, since the stream is broken anyway.
gax.Sleep(p.ctx, bo.Pause())
p.openLocked()
continue
}
// Not an error, or not a retryable error; stop retrying.
p.err = err
return err
}
}
// Logic from https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java.
func isRetryable(err error) bool {
s, ok := status.FromError(err)
if !ok { // includes io.EOF, normal stream close, which causes us to reopen
return true
}
switch s.Code() {
case codes.DeadlineExceeded, codes.Internal, codes.Canceled, codes.ResourceExhausted:
return true
case codes.Unavailable:
return !strings.Contains(s.Message(), "Server shutdownNow invoked")
default:
return false
}
}
func (p *streamingPuller) fetchMessages() ([]*Message, error) {
var res *pb.StreamingPullResponse
err := p.call(func(spc pb.Subscriber_StreamingPullClient) error {
var err error
res, err = spc.Recv()
return err
})
if err != nil {
return nil, err
}
return convertMessages(res.ReceivedMessages)
}
func (p *streamingPuller) send(req *pb.StreamingPullRequest) error {
// Note: len(modAckIDs) == len(modSecs)
var rest *pb.StreamingPullRequest
for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 {
req, rest = splitRequest(req, maxPayload)
err := p.call(func(spc pb.Subscriber_StreamingPullClient) error {
x := spc.Send(req)
return x
})
if err != nil {
return err
}
req = rest
}
return nil
}
func (p *streamingPuller) closeSend() {
p.mu.Lock()
p.closed = true
p.spc.CloseSend()
p.mu.Unlock()
}
// Split req into a prefix that is smaller than maxSize, and a remainder.
func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) {
const int32Bytes = 4
// Copy all fields before splitting the variable-sized ones.
remainder = &pb.StreamingPullRequest{}
*remainder = *req
// Split message so it isn't too big.
size := reqFixedOverhead
i := 0
for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) {
if i < len(req.AckIds) {
size += overheadPerID + len(req.AckIds[i])
}
if i < len(req.ModifyDeadlineAckIds) {
size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes
}
i++
}
min := func(a, b int) int {
if a < b {
return a
}
return b
}
j := i
if size > maxSize {
j--
}
k := min(j, len(req.AckIds))
remainder.AckIds = req.AckIds[k:]
req.AckIds = req.AckIds[:k]
k = min(j, len(req.ModifyDeadlineAckIds))
remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:]
remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:]
req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k]
req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k]
return req, remainder
}
func (s *apiService) createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error) {
snap, err := s.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{
Name: snapName,
Subscription: subName,
})
if err != nil {
return nil, err
}
return s.toSnapshotConfig(snap)
}
func (s *apiService) deleteSnapshot(ctx context.Context, snapName string) error {
return s.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: snapName})
}
func (s *apiService) listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc {
it := s.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{
Project: projName,
})
return func() (*snapshotConfig, error) {
snap, err := it.Next()
if err != nil {
return nil, err
}
return s.toSnapshotConfig(snap)
}
}
func (s *apiService) toSnapshotConfig(snap *pb.Snapshot) (*snapshotConfig, error) {
exp, err := ptypes.Timestamp(snap.ExpireTime)
if err != nil {
return nil, err
}
return &snapshotConfig{
snapshot: &snapshot{
s: s,
name: snap.Name,
},
Topic: newTopic(s, snap.Topic),
Expiration: exp,
}, nil
}
func (s *apiService) seekToTime(ctx context.Context, subName string, t time.Time) error {
ts, err := ptypes.TimestampProto(t)
if err != nil {
return err
}
_, err = s.subc.Seek(ctx, &pb.SeekRequest{
Subscription: subName,
Target: &pb.SeekRequest_Time{ts},
})
return err
}
func (s *apiService) seekToSnapshot(ctx context.Context, subName, snapName string) error {
_, err := s.subc.Seek(ctx, &pb.SeekRequest{
Subscription: subName,
Target: &pb.SeekRequest_Snapshot{snapName},
})
return err
}

68
vendor/cloud.google.com/go/pubsub/service_test.go generated vendored Normal file
View File

@@ -0,0 +1,68 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"reflect"
"testing"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)
func TestSplitRequest(t *testing.T) {
split := func(a []string, i int) ([]string, []string) {
if len(a) < i {
return a, nil
}
return a[:i], a[i:]
}
ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}
modDeadlines := []int32{1, 2, 3, 4, 5}
for i, test := range []struct {
ackIDs []string
modAckIDs []string
splitIndex int
}{
{ackIDs, ackIDs, 2},
{nil, ackIDs, 3},
{ackIDs, nil, 5},
{nil, ackIDs[:1], 1},
} {
req := &pb.StreamingPullRequest{
AckIds: test.ackIDs,
ModifyDeadlineAckIds: test.modAckIDs,
ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)],
}
a1, a2 := split(test.ackIDs, test.splitIndex)
m1, m2 := split(test.modAckIDs, test.splitIndex)
want1 := &pb.StreamingPullRequest{
AckIds: a1,
ModifyDeadlineAckIds: m1,
ModifyDeadlineSeconds: modDeadlines[:len(m1)],
}
want2 := &pb.StreamingPullRequest{
AckIds: a2,
ModifyDeadlineAckIds: m2,
ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)],
}
got1, got2 := splitRequest(req, reqFixedOverhead+40)
if !reflect.DeepEqual(got1, want1) {
t.Errorf("#%d: first:\ngot %+v\nwant %+v", i, got1, want1)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("#%d: second:\ngot %+v\nwant %+v", i, got2, want2)
}
}
}

119
vendor/cloud.google.com/go/pubsub/snapshot.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"strings"
"time"
vkit "cloud.google.com/go/pubsub/apiv1"
"golang.org/x/net/context"
)
// Snapshot is a reference to a PubSub snapshot.
type snapshot struct {
s service
// The fully qualified identifier for the snapshot, in the format "projects/<projid>/snapshots/<snap>"
name string
}
// ID returns the unique identifier of the snapshot within its project.
func (s *snapshot) ID() string {
slash := strings.LastIndex(s.name, "/")
if slash == -1 {
// name is not a fully-qualified name.
panic("bad snapshot name")
}
return s.name[slash+1:]
}
// SnapshotConfig contains the details of a Snapshot.
type snapshotConfig struct {
*snapshot
Topic *Topic
Expiration time.Time
}
// Snapshot creates a reference to a snapshot.
func (c *Client) snapshot(id string) *snapshot {
return &snapshot{
s: c.s,
name: vkit.SubscriberSnapshotPath(c.projectID, id),
}
}
// Snapshots returns an iterator which returns snapshots for this project.
func (c *Client) snapshots(ctx context.Context) *snapshotConfigIterator {
return &snapshotConfigIterator{
next: c.s.listProjectSnapshots(ctx, c.fullyQualifiedProjectName()),
}
}
// SnapshotConfigIterator is an iterator that returns a series of snapshots.
type snapshotConfigIterator struct {
next nextSnapshotFunc
}
// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results.
// Once Next returns iterator.Done, all subsequent calls will return iterator.Done.
func (snaps *snapshotConfigIterator) Next() (*snapshotConfig, error) {
return snaps.next()
}
// Delete deletes a snapshot.
func (snap *snapshot) delete(ctx context.Context) error {
return snap.s.deleteSnapshot(ctx, snap.name)
}
// SeekTime seeks the subscription to a point in time.
//
// Messages retained in the subscription that were published before this
// time are marked as acknowledged, and messages retained in the
// subscription that were published after this time are marked as
// unacknowledged. Note that this operation affects only those messages
// retained in the subscription (configured by SnapshotConfig). For example,
// if `time` corresponds to a point before the message retention
// window (or to a point before the system's notion of the subscription
// creation time), only retained messages will be marked as unacknowledged,
// and already-expunged messages will not be restored.
func (s *Subscription) seekToTime(ctx context.Context, t time.Time) error {
return s.s.seekToTime(ctx, s.name, t)
}
// Snapshot creates a new snapshot from this subscription.
// The snapshot will be for the topic this subscription is subscribed to.
// If the name is empty string, a unique name is assigned.
//
// The created snapshot is guaranteed to retain:
// (a) The existing backlog on the subscription. More precisely, this is
// defined as the messages in the subscription's backlog that are
// unacknowledged when Snapshot returns without error.
// (b) Any messages published to the subscription's topic following
// Snapshot returning without error.
func (s *Subscription) createSnapshot(ctx context.Context, name string) (*snapshotConfig, error) {
if name != "" {
name = vkit.SubscriberSnapshotPath(strings.Split(s.name, "/")[1], name)
}
return s.s.createSnapshot(ctx, name, s.name)
}
// SeekSnapshot seeks the subscription to a snapshot.
//
// The snapshot needs not be created from this subscription,
// but the snapshot must be for the topic this subscription is subscribed to.
func (s *Subscription) seekToSnapshot(ctx context.Context, snap *snapshot) error {
return s.s.seekToSnapshot(ctx, s.name, snap.name)
}

View File

@@ -0,0 +1,333 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
// TODO(jba): test keepalive
// TODO(jba): test that expired messages are not kept alive
// TODO(jba): test that when all messages expire, Stop returns.
import (
"io"
"reflect"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
"golang.org/x/net/context"
"google.golang.org/api/option"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
timestamp = &tspb.Timestamp{}
testMessages = []*pb.ReceivedMessage{
{AckId: "0", Message: &pb.PubsubMessage{Data: []byte{1}, PublishTime: timestamp}},
{AckId: "1", Message: &pb.PubsubMessage{Data: []byte{2}, PublishTime: timestamp}},
{AckId: "2", Message: &pb.PubsubMessage{Data: []byte{3}, PublishTime: timestamp}},
}
)
func TestStreamingPullBasic(t *testing.T) {
client, server := newFake(t)
server.addStreamingPullMessages(testMessages)
testStreamingPullIteration(t, client, server, testMessages)
}
func TestStreamingPullMultipleFetches(t *testing.T) {
client, server := newFake(t)
server.addStreamingPullMessages(testMessages[:1])
server.addStreamingPullMessages(testMessages[1:])
testStreamingPullIteration(t, client, server, testMessages)
}
func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) {
if !useStreamingPull {
t.SkipNow()
}
sub := client.Subscription("s")
gotMsgs, err := pullN(context.Background(), sub, len(msgs), func(_ context.Context, m *Message) {
id, err := strconv.Atoi(m.ackID)
if err != nil {
panic(err)
}
// ack evens, nack odds
if id%2 == 0 {
m.Ack()
} else {
m.Nack()
}
})
if err != nil {
t.Fatalf("Pull: %v", err)
}
gotMap := map[string]*Message{}
for _, m := range gotMsgs {
gotMap[m.ackID] = m
}
for i, msg := range msgs {
want, err := toMessage(msg)
if err != nil {
t.Fatal(err)
}
want.calledDone = true
got := gotMap[want.ackID]
if got == nil {
t.Errorf("%d: no message for ackID %q", i, want.ackID)
continue
}
got.doneFunc = nil // Don't compare done; it's a function.
if !reflect.DeepEqual(got, want) {
t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want)
}
}
server.wait()
for i := 0; i < len(msgs); i++ {
id := msgs[i].AckId
if i%2 == 0 {
if !server.Acked[id] {
t.Errorf("msg %q should have been acked but wasn't", id)
}
} else {
if dl, ok := server.Deadlines[id]; !ok || dl != 0 {
t.Errorf("msg %q should have been nacked but wasn't", id)
}
}
}
}
func TestStreamingPullError(t *testing.T) {
// If an RPC to the service returns a non-retryable error, Pull should
// return after all callbacks return, without waiting for messages to be
// acked.
if !useStreamingPull {
t.SkipNow()
}
client, server := newFake(t)
server.addStreamingPullMessages(testMessages[:1])
server.addStreamingPullError(grpc.Errorf(codes.Unknown, ""))
sub := client.Subscription("s")
// Use only one goroutine, since the fake server is configured to
// return only one error.
sub.ReceiveSettings.NumGoroutines = 1
callbackDone := make(chan struct{})
ctx, _ := context.WithTimeout(context.Background(), time.Second)
err := sub.Receive(ctx, func(ctx context.Context, m *Message) {
defer close(callbackDone)
select {
case <-ctx.Done():
return
}
})
select {
case <-callbackDone:
default:
t.Fatal("Receive returned but callback was not done")
}
if want := codes.Unknown; grpc.Code(err) != want {
t.Fatalf("got <%v>, want code %v", err, want)
}
}
func TestStreamingPullCancel(t *testing.T) {
// If Receive's context is canceled, it should return after all callbacks
// return and all messages have been acked.
if !useStreamingPull {
t.SkipNow()
}
client, server := newFake(t)
server.addStreamingPullMessages(testMessages)
sub := client.Subscription("s")
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
var n int32
err := sub.Receive(ctx, func(ctx2 context.Context, m *Message) {
atomic.AddInt32(&n, 1)
defer atomic.AddInt32(&n, -1)
cancel()
m.Ack()
})
if got := atomic.LoadInt32(&n); got != 0 {
t.Errorf("Receive returned with %d callbacks still running", got)
}
if err != nil {
t.Fatalf("Receive got <%v>, want nil", err)
}
}
func TestStreamingPullRetry(t *testing.T) {
if !useStreamingPull {
t.SkipNow()
}
// Check that we retry on io.EOF or Unavailable.
client, server := newFake(t)
server.addStreamingPullMessages(testMessages[:1])
server.addStreamingPullError(io.EOF)
server.addStreamingPullError(io.EOF)
server.addStreamingPullMessages(testMessages[1:2])
server.addStreamingPullError(grpc.Errorf(codes.Unavailable, ""))
server.addStreamingPullError(grpc.Errorf(codes.Unavailable, ""))
server.addStreamingPullMessages(testMessages[2:])
testStreamingPullIteration(t, client, server, testMessages)
}
func TestStreamingPullOneActive(t *testing.T) {
// Only one call to Pull can be active at a time.
if !useStreamingPull {
t.SkipNow()
}
client, srv := newFake(t)
srv.addStreamingPullMessages(testMessages[:1])
sub := client.Subscription("s")
ctx, cancel := context.WithCancel(context.Background())
err := sub.Receive(ctx, func(ctx context.Context, m *Message) {
m.Ack()
err := sub.Receive(ctx, func(context.Context, *Message) {})
if err != errReceiveInProgress {
t.Errorf("got <%v>, want <%v>", err, errReceiveInProgress)
}
cancel()
})
if err != nil {
t.Fatalf("got <%v>, want nil", err)
}
}
func TestStreamingPullConcurrent(t *testing.T) {
if !useStreamingPull {
t.SkipNow()
}
newMsg := func(i int) *pb.ReceivedMessage {
return &pb.ReceivedMessage{
AckId: strconv.Itoa(i),
Message: &pb.PubsubMessage{Data: []byte{byte(i)}, PublishTime: timestamp},
}
}
// Multiple goroutines should be able to read from the same iterator.
client, server := newFake(t)
// Add a lot of messages, a few at a time, to make sure both threads get a chance.
nMessages := 100
for i := 0; i < nMessages; i += 2 {
server.addStreamingPullMessages([]*pb.ReceivedMessage{newMsg(i), newMsg(i + 1)})
}
sub := client.Subscription("s")
ctx, _ := context.WithTimeout(context.Background(), time.Second)
gotMsgs, err := pullN(ctx, sub, nMessages, func(ctx context.Context, m *Message) {
m.Ack()
})
if err != nil {
t.Fatalf("Receive: %v", err)
}
seen := map[string]bool{}
for _, gm := range gotMsgs {
if seen[gm.ackID] {
t.Fatalf("duplicate ID %q", gm.ackID)
}
seen[gm.ackID] = true
}
if len(seen) != nMessages {
t.Fatalf("got %d messages, want %d", len(seen), nMessages)
}
}
func TestStreamingPullFlowControl(t *testing.T) {
// Callback invocations should not occur if flow control limits are exceeded.
if !useStreamingPull {
t.SkipNow()
}
client, server := newFake(t)
server.addStreamingPullMessages(testMessages)
sub := client.Subscription("s")
sub.ReceiveSettings.MaxOutstandingMessages = 2
ctx, cancel := context.WithCancel(context.Background())
activec := make(chan int)
waitc := make(chan int)
errc := make(chan error)
go func() {
errc <- sub.Receive(ctx, func(_ context.Context, m *Message) {
activec <- 1
<-waitc
m.Ack()
})
}()
// Here, two callbacks are active. Receive should be blocked in the flow
// control acquire method on the third message.
<-activec
<-activec
select {
case <-activec:
t.Fatal("third callback in progress")
case <-time.After(100 * time.Millisecond):
}
cancel()
// Receive still has not returned, because both callbacks are still blocked on waitc.
select {
case err := <-errc:
t.Fatalf("Receive returned early with error %v", err)
case <-time.After(100 * time.Millisecond):
}
// Let both callbacks proceed.
waitc <- 1
waitc <- 1
// The third callback will never run, because acquire returned a non-nil
// error, causing Receive to return. So now Receive should end.
if err := <-errc; err != nil {
t.Fatalf("got %v from Receive, want nil", err)
}
}
func newFake(t *testing.T) (*Client, *fakeServer) {
srv, err := newFakeServer()
if err != nil {
t.Fatal(err)
}
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
if err != nil {
t.Fatal(err)
}
client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn))
if err != nil {
t.Fatal(err)
}
return client, srv
}
// pullN calls sub.Receive until at least n messages are received.
func pullN(ctx context.Context, sub *Subscription, n int, f func(context.Context, *Message)) ([]*Message, error) {
var (
mu sync.Mutex
msgs []*Message
)
cctx, cancel := context.WithCancel(ctx)
err := sub.Receive(cctx, func(ctx context.Context, m *Message) {
mu.Lock()
msgs = append(msgs, m)
nSeen := len(msgs)
mu.Unlock()
f(ctx, m)
if nSeen >= n {
cancel()
}
})
if err != nil {
return nil, err
}
return msgs, nil
}

394
vendor/cloud.google.com/go/pubsub/subscription.go generated vendored Normal file
View File

@@ -0,0 +1,394 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"fmt"
"strings"
"sync"
"time"
"cloud.google.com/go/iam"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"google.golang.org/api/iterator"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// Subscription is a reference to a PubSub subscription.
type Subscription struct {
s service
// The fully qualified identifier for the subscription, in the format "projects/<projid>/subscriptions/<name>"
name string
// Settings for pulling messages. Configure these before calling Receive.
ReceiveSettings ReceiveSettings
mu sync.Mutex
receiveActive bool
}
// Subscription creates a reference to a subscription.
func (c *Client) Subscription(id string) *Subscription {
return newSubscription(c.s, fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id))
}
func newSubscription(s service, name string) *Subscription {
return &Subscription{
s: s,
name: name,
}
}
// String returns the globally unique printable name of the subscription.
func (s *Subscription) String() string {
return s.name
}
// ID returns the unique identifier of the subscription within its project.
func (s *Subscription) ID() string {
slash := strings.LastIndex(s.name, "/")
if slash == -1 {
// name is not a fully-qualified name.
panic("bad subscription name")
}
return s.name[slash+1:]
}
// Subscriptions returns an iterator which returns all of the subscriptions for the client's project.
func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator {
return &SubscriptionIterator{
s: c.s,
next: c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName()),
}
}
// SubscriptionIterator is an iterator that returns a series of subscriptions.
type SubscriptionIterator struct {
s service
next nextStringFunc
}
// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned.
func (subs *SubscriptionIterator) Next() (*Subscription, error) {
subName, err := subs.next()
if err != nil {
return nil, err
}
return newSubscription(subs.s, subName), nil
}
// PushConfig contains configuration for subscriptions that operate in push mode.
type PushConfig struct {
// A URL locating the endpoint to which messages should be pushed.
Endpoint string
// Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details.
Attributes map[string]string
}
// Subscription config contains the configuration of a subscription.
type SubscriptionConfig struct {
Topic *Topic
PushConfig PushConfig
// The default maximum time after a subscriber receives a message before
// the subscriber should acknowledge the message. Note: messages which are
// obtained via Subscription.Receive need not be acknowledged within this
// deadline, as the deadline will be automatically extended.
AckDeadline time.Duration
// Whether to retain acknowledged messages. If true, acknowledged messages
// will not be expunged until they fall out of the RetentionDuration window.
retainAckedMessages bool
// How long to retain messages in backlog, from the time of publish. If RetainAckedMessages is true,
// this duration affects the retention of acknowledged messages,
// otherwise only unacknowledged messages are retained.
// Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes.
retentionDuration time.Duration
}
// ReceiveSettings configure the Receive method.
// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings.
type ReceiveSettings struct {
// MaxExtension is the maximum period for which the Subscription should
// automatically extend the ack deadline for each message.
//
// The Subscription will automatically extend the ack deadline of all
// fetched Messages for the duration specified. Automatic deadline
// extension may be disabled by specifying a duration less than 1.
MaxExtension time.Duration
// MaxOutstandingMessages is the maximum number of unprocessed messages
// (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it
// will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages.
// If the value is negative, then there will be no limit on the number of
// unprocessed messages.
MaxOutstandingMessages int
// MaxOutstandingBytes is the maximum size of unprocessed messages
// (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will
// be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If
// the value is negative, then there will be no limit on the number of bytes
// for unprocessed messages.
MaxOutstandingBytes int
// NumGoroutines is the number of goroutines Receive will spawn to pull
// messages concurrently. If NumGoroutines is less than 1, it will be treated
// as if it were DefaultReceiveSettings.NumGoroutines.
NumGoroutines int
}
// DefaultReceiveSettings holds the default values for ReceiveSettings.
var DefaultReceiveSettings = ReceiveSettings{
MaxExtension: 10 * time.Minute,
MaxOutstandingMessages: 1000,
MaxOutstandingBytes: 1e9, // 1G
NumGoroutines: 1,
}
// Delete deletes the subscription.
func (s *Subscription) Delete(ctx context.Context) error {
return s.s.deleteSubscription(ctx, s.name)
}
// Exists reports whether the subscription exists on the server.
func (s *Subscription) Exists(ctx context.Context) (bool, error) {
return s.s.subscriptionExists(ctx, s.name)
}
// Config fetches the current configuration for the subscription.
func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) {
conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name)
if err != nil {
return SubscriptionConfig{}, err
}
conf.Topic = &Topic{
s: s.s,
name: topicName,
}
return conf, nil
}
// SubscriptionConfigToUpdate describes how to update a subscription.
type SubscriptionConfigToUpdate struct {
// If non-nil, the push config is changed.
PushConfig *PushConfig
}
// Update changes an existing subscription according to the fields set in cfg.
// It returns the new SubscriptionConfig.
//
// Update returns an error if no fields were modified.
func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) {
if cfg.PushConfig == nil {
return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update")
}
if err := s.s.modifyPushConfig(ctx, s.name, *cfg.PushConfig); err != nil {
return SubscriptionConfig{}, err
}
return s.Config(ctx)
}
func (s *Subscription) IAM() *iam.Handle {
return s.s.iamHandle(s.name)
}
// CreateSubscription creates a new subscription on a topic.
//
// id is the name of the subscription to create. It must start with a letter,
// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-),
// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It
// must be between 3 and 255 characters in length, and must not start with
// "goog".
//
// cfg.Topic is the topic from which the subscription should receive messages. It
// need not belong to the same project as the subscription. This field is required.
//
// cfg.AckDeadline is the maximum time after a subscriber receives a message before
// the subscriber should acknowledge the message. It must be between 10 and 600
// seconds (inclusive), and is rounded down to the nearest second. If the
// provided ackDeadline is 0, then the default value of 10 seconds is used.
// Note: messages which are obtained via Subscription.Receive need not be
// acknowledged within this deadline, as the deadline will be automatically
// extended.
//
// cfg.PushConfig may be set to configure this subscription for push delivery.
//
// If the subscription already exists an error will be returned.
func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) {
if cfg.Topic == nil {
return nil, errors.New("pubsub: require non-nil Topic")
}
if cfg.AckDeadline == 0 {
cfg.AckDeadline = 10 * time.Second
}
if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second {
return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d)
}
sub := c.Subscription(id)
err := c.s.createSubscription(ctx, sub.name, cfg)
return sub, err
}
var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription")
// Receive calls f with the outstanding messages from the subscription.
// It blocks until ctx is done, or the service returns a non-retryable error.
//
// The standard way to terminate a Receive is to cancel its context:
//
// cctx, cancel := context.WithCancel(ctx)
// err := sub.Receive(cctx, callback)
// // Call cancel from callback, or another goroutine.
//
// If the service returns a non-retryable error, Receive returns that error after
// all of the outstanding calls to f have returned. If ctx is done, Receive
// returns nil after all of the outstanding calls to f have returned and
// all messages have been acknowledged or have expired.
//
// Receive calls f concurrently from multiple goroutines. It is encouraged to
// process messages synchronously in f, even if that processing is relatively
// time-consuming; Receive will spawn new goroutines for incoming messages,
// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings.
//
// The context passed to f will be canceled when ctx is Done or there is a
// fatal service error.
//
// Receive will automatically extend the ack deadline of all fetched Messages for the
// period specified by s.ReceiveSettings.MaxExtension.
//
// Each Subscription may have only one invocation of Receive active at a time.
func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error {
s.mu.Lock()
if s.receiveActive {
s.mu.Unlock()
return errReceiveInProgress
}
s.receiveActive = true
s.mu.Unlock()
defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }()
config, err := s.Config(ctx)
if err != nil {
if grpc.Code(err) == codes.Canceled {
return nil
}
return err
}
maxCount := s.ReceiveSettings.MaxOutstandingMessages
if maxCount == 0 {
maxCount = DefaultReceiveSettings.MaxOutstandingMessages
}
maxBytes := s.ReceiveSettings.MaxOutstandingBytes
if maxBytes == 0 {
maxBytes = DefaultReceiveSettings.MaxOutstandingBytes
}
maxExt := s.ReceiveSettings.MaxExtension
if maxExt == 0 {
maxExt = DefaultReceiveSettings.MaxExtension
} else if maxExt < 0 {
// If MaxExtension is negative, disable automatic extension.
maxExt = 0
}
numGoroutines := s.ReceiveSettings.NumGoroutines
if numGoroutines < 1 {
numGoroutines = DefaultReceiveSettings.NumGoroutines
}
// TODO(jba): add tests that verify that ReceiveSettings are correctly processed.
po := &pullOptions{
maxExtension: maxExt,
maxPrefetch: trunc32(int64(maxCount)),
ackDeadline: config.AckDeadline,
}
fc := newFlowController(maxCount, maxBytes)
// Wait for all goroutines started by Receive to return, so instead of an
// obscure goroutine leak we have an obvious blocked call to Receive.
group, gctx := errgroup.WithContext(ctx)
for i := 0; i < numGoroutines; i++ {
group.Go(func() error {
return s.receive(gctx, po, fc, f)
})
}
return group.Wait()
}
func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error {
// Cancel a sub-context when we return, to kick the context-aware callbacks
// and the goroutine below.
ctx2, cancel := context.WithCancel(ctx)
// Call stop when Receive's context is done.
// Stop will block until all outstanding messages have been acknowledged
// or there was a fatal service error.
// The iterator does not use the context passed to Receive. If it did, canceling
// that context would immediately stop the iterator without waiting for unacked
// messages.
iter := newMessageIterator(context.Background(), s.s, s.name, po)
// We cannot use errgroup from Receive here. Receive might already be calling group.Wait,
// and group.Wait cannot be called concurrently with group.Go. We give each receive() its
// own WaitGroup instead.
// Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed
// to be called after all Adds.
var wg sync.WaitGroup
wg.Add(1)
go func() {
<-ctx2.Done()
iter.Stop()
wg.Done()
}()
defer wg.Wait()
defer cancel()
for {
msg, err := iter.Next()
if err == iterator.Done {
return nil
}
if err != nil {
return err
}
// TODO(jba): call acquire closer to when the message is allocated.
if err := fc.acquire(ctx, len(msg.Data)); err != nil {
// TODO(jba): test that this "orphaned" message is nacked immediately when ctx is done.
msg.Nack()
return nil
}
wg.Add(1)
go func() {
// TODO(jba): call release when the message is available for GC.
// This considers the message to be released when
// f is finished, but f may ack early or not at all.
defer wg.Done()
defer fc.release(len(msg.Data))
f(ctx2, msg)
}()
}
}
// TODO(jba): remove when we delete messageIterator.
type pullOptions struct {
maxExtension time.Duration
maxPrefetch int32
// ackDeadline is the default ack deadline for the subscription. Not
// configurable.
ackDeadline time.Duration
}

149
vendor/cloud.google.com/go/pubsub/subscription_test.go generated vendored Normal file
View File

@@ -0,0 +1,149 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"reflect"
"testing"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
type subListService struct {
service
subs []string
err error
t *testing.T // for error logging.
}
func (s *subListService) newNextStringFunc() nextStringFunc {
return func() (string, error) {
if len(s.subs) == 0 {
return "", iterator.Done
}
sn := s.subs[0]
s.subs = s.subs[1:]
return sn, s.err
}
}
func (s *subListService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc {
if projName != "projects/projid" {
s.t.Fatalf("unexpected call: projName: %q", projName)
return nil
}
return s.newNextStringFunc()
}
func (s *subListService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc {
if topicName != "projects/projid/topics/topic" {
s.t.Fatalf("unexpected call: topicName: %q", topicName)
return nil
}
return s.newNextStringFunc()
}
// All returns the remaining subscriptions from this iterator.
func slurpSubs(it *SubscriptionIterator) ([]*Subscription, error) {
var subs []*Subscription
for {
switch sub, err := it.Next(); err {
case nil:
subs = append(subs, sub)
case iterator.Done:
return subs, nil
default:
return nil, err
}
}
}
func TestSubscriptionID(t *testing.T) {
const id = "id"
serv := &subListService{
subs: []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2"},
t: t,
}
c := &Client{projectID: "projid", s: serv}
s := c.Subscription(id)
if got, want := s.ID(), id; got != want {
t.Errorf("Subscription.ID() = %q; want %q", got, want)
}
want := []string{"s1", "s2"}
subs, err := slurpSubs(c.Subscriptions(context.Background()))
if err != nil {
t.Errorf("error listing subscriptions: %v", err)
}
for i, s := range subs {
if got, want := s.ID(), want[i]; got != want {
t.Errorf("Subscription.ID() = %q; want %q", got, want)
}
}
}
func TestListProjectSubscriptions(t *testing.T) {
snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2",
"projects/projid/subscriptions/s3"}
s := &subListService{subs: snames, t: t}
c := &Client{projectID: "projid", s: s}
subs, err := slurpSubs(c.Subscriptions(context.Background()))
if err != nil {
t.Errorf("error listing subscriptions: %v", err)
}
got := subNames(subs)
want := []string{
"projects/projid/subscriptions/s1",
"projects/projid/subscriptions/s2",
"projects/projid/subscriptions/s3"}
if !reflect.DeepEqual(got, want) {
t.Errorf("sub list: got: %v, want: %v", got, want)
}
if len(s.subs) != 0 {
t.Errorf("outstanding subs: %v", s.subs)
}
}
func TestListTopicSubscriptions(t *testing.T) {
snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2",
"projects/projid/subscriptions/s3"}
s := &subListService{subs: snames, t: t}
c := &Client{projectID: "projid", s: s}
subs, err := slurpSubs(c.Topic("topic").Subscriptions(context.Background()))
if err != nil {
t.Errorf("error listing subscriptions: %v", err)
}
got := subNames(subs)
want := []string{
"projects/projid/subscriptions/s1",
"projects/projid/subscriptions/s2",
"projects/projid/subscriptions/s3"}
if !reflect.DeepEqual(got, want) {
t.Errorf("sub list: got: %v, want: %v", got, want)
}
if len(s.subs) != 0 {
t.Errorf("outstanding subs: %v", s.subs)
}
}
func subNames(subs []*Subscription) []string {
var names []string
for _, sub := range subs {
names = append(names, sub.name)
}
return names
}

358
vendor/cloud.google.com/go/pubsub/topic.go generated vendored Normal file
View File

@@ -0,0 +1,358 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"errors"
"fmt"
"runtime"
"strings"
"sync"
"time"
"cloud.google.com/go/iam"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/support/bundler"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)
const (
// The maximum number of messages that can be in a single publish request, as
// determined by the PubSub service.
MaxPublishRequestCount = 1000
// The maximum size of a single publish request in bytes, as determined by the PubSub service.
MaxPublishRequestBytes = 1e7
maxInt = int(^uint(0) >> 1)
)
// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes.
var ErrOversizedMessage = bundler.ErrOversizedItem
// Topic is a reference to a PubSub topic.
//
// The methods of Topic are safe for use by multiple goroutines.
type Topic struct {
s service
// The fully qualified identifier for the topic, in the format "projects/<projid>/topics/<name>"
name string
// Settings for publishing messages. All changes must be made before the
// first call to Publish. The default is DefaultPublishSettings.
PublishSettings PublishSettings
mu sync.RWMutex
stopped bool
bundler *bundler.Bundler
wg sync.WaitGroup
// Channel for message bundles to be published. Close to indicate that Stop was called.
bundlec chan []*bundledMessage
}
// PublishSettings control the bundling of published messages.
type PublishSettings struct {
// Publish a non-empty batch after this delay has passed.
DelayThreshold time.Duration
// Publish a batch when it has this many messages. The maximum is
// MaxPublishRequestCount.
CountThreshold int
// Publish a batch when its size in bytes reaches this value.
ByteThreshold int
// The number of goroutines that invoke the Publish RPC concurrently.
// Defaults to a multiple of GOMAXPROCS.
NumGoroutines int
// The maximum time that the client will attempt to publish a bundle of messages.
Timeout time.Duration
}
// DefaultPublishSettings holds the default values for topics' PublishSettings.
var DefaultPublishSettings = PublishSettings{
DelayThreshold: 1 * time.Millisecond,
CountThreshold: 100,
ByteThreshold: 1e6,
Timeout: 60 * time.Second,
}
// CreateTopic creates a new topic.
// The specified topic ID must start with a letter, and contain only letters
// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.),
// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255
// characters in length, and must not start with "goog".
// If the topic already exists an error will be returned.
func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) {
t := c.Topic(id)
err := c.s.createTopic(ctx, t.name)
return t, err
}
// Topic creates a reference to a topic.
//
// If a Topic's Publish method is called, it has background goroutines
// associated with it. Clean them up by calling Topic.Stop.
//
// Avoid creating many Topic instances if you use them to publish.
func (c *Client) Topic(id string) *Topic {
return newTopic(c.s, fmt.Sprintf("projects/%s/topics/%s", c.projectID, id))
}
func newTopic(s service, name string) *Topic {
// bundlec is unbuffered. A buffer would occupy memory not
// accounted for by the bundler, so BufferedByteLimit would be a lie:
// the actual memory consumed would be higher.
return &Topic{
s: s,
name: name,
PublishSettings: DefaultPublishSettings,
bundlec: make(chan []*bundledMessage),
}
}
// Topics returns an iterator which returns all of the topics for the client's project.
func (c *Client) Topics(ctx context.Context) *TopicIterator {
return &TopicIterator{
s: c.s,
next: c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName()),
}
}
// TopicIterator is an iterator that returns a series of topics.
type TopicIterator struct {
s service
next nextStringFunc
}
// Next returns the next topic. If there are no more topics, iterator.Done will be returned.
func (tps *TopicIterator) Next() (*Topic, error) {
topicName, err := tps.next()
if err != nil {
return nil, err
}
return newTopic(tps.s, topicName), nil
}
// ID returns the unique idenfier of the topic within its project.
func (t *Topic) ID() string {
slash := strings.LastIndex(t.name, "/")
if slash == -1 {
// name is not a fully-qualified name.
panic("bad topic name")
}
return t.name[slash+1:]
}
// String returns the printable globally unique name for the topic.
func (t *Topic) String() string {
return t.name
}
// Delete deletes the topic.
func (t *Topic) Delete(ctx context.Context) error {
return t.s.deleteTopic(ctx, t.name)
}
// Exists reports whether the topic exists on the server.
func (t *Topic) Exists(ctx context.Context) (bool, error) {
if t.name == "_deleted-topic_" {
return false, nil
}
return t.s.topicExists(ctx, t.name)
}
func (t *Topic) IAM() *iam.Handle {
return t.s.iamHandle(t.name)
}
// Subscriptions returns an iterator which returns the subscriptions for this topic.
func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator {
// NOTE: zero or more Subscriptions that are ultimately returned by this
// Subscriptions iterator may belong to a different project to t.
return &SubscriptionIterator{
s: t.s,
next: t.s.listTopicSubscriptions(ctx, t.name),
}
}
var errTopicStopped = errors.New("pubsub: Stop has been called for this topic")
// Publish publishes msg to the topic asynchronously. Messages are batched and
// sent according to the topic's PublishSettings. Publish never blocks.
//
// Publish returns a non-nil PublishResult which will be ready when the
// message has been sent (or has failed to be sent) to the server.
//
// Publish creates goroutines for batching and sending messages. These goroutines
// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish
// will immediately return a PublishResult with an error.
func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult {
// TODO(jba): if this turns out to take significant time, try to approximate it.
// Or, convert the messages to protos in Publish, instead of in the service.
msg.size = proto.Size(&pb.PubsubMessage{
Data: msg.Data,
Attributes: msg.Attributes,
})
r := &PublishResult{ready: make(chan struct{})}
t.initBundler()
t.mu.RLock()
defer t.mu.RUnlock()
// TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here
if t.stopped {
r.set("", errTopicStopped)
return r
}
// TODO(jba) [from bcmills] consider using a shared channel per bundle
// (requires Bundler API changes; would reduce allocations)
// The call to Add should never return an error because the bundler's
// BufferedByteLimit is set to maxInt; we do not perform any flow
// control in the client.
err := t.bundler.Add(&bundledMessage{msg, r}, msg.size)
if err != nil {
r.set("", err)
}
return r
}
// Send all remaining published messages and stop goroutines created for handling
// publishing. Returns once all outstanding messages have been sent or have
// failed to be sent.
func (t *Topic) Stop() {
t.mu.Lock()
noop := t.stopped || t.bundler == nil
t.stopped = true
t.mu.Unlock()
if noop {
return
}
t.bundler.Flush()
// At this point, all pending bundles have been published and the bundler's
// goroutines have exited, so it is OK for this goroutine to close bundlec.
close(t.bundlec)
t.wg.Wait()
}
// A PublishResult holds the result from a call to Publish.
type PublishResult struct {
ready chan struct{}
serverID string
err error
}
// Ready returns a channel that is closed when the result is ready.
// When the Ready channel is closed, Get is guaranteed not to block.
func (r *PublishResult) Ready() <-chan struct{} { return r.ready }
// Get returns the server-generated message ID and/or error result of a Publish call.
// Get blocks until the Publish call completes or the context is done.
func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) {
// If the result is already ready, return it even if the context is done.
select {
case <-r.Ready():
return r.serverID, r.err
default:
}
select {
case <-ctx.Done():
return "", ctx.Err()
case <-r.Ready():
return r.serverID, r.err
}
}
func (r *PublishResult) set(sid string, err error) {
r.serverID = sid
r.err = err
close(r.ready)
}
type bundledMessage struct {
msg *Message
res *PublishResult
}
func (t *Topic) initBundler() {
t.mu.RLock()
noop := t.stopped || t.bundler != nil
t.mu.RUnlock()
if noop {
return
}
t.mu.Lock()
defer t.mu.Unlock()
// Must re-check, since we released the lock.
if t.stopped || t.bundler != nil {
return
}
// TODO(jba): use a context detached from the one passed to NewClient.
ctx := context.TODO()
// Unless overridden, run several goroutines per CPU to call the Publish RPC.
n := t.PublishSettings.NumGoroutines
if n <= 0 {
n = 25 * runtime.GOMAXPROCS(0)
}
timeout := t.PublishSettings.Timeout
t.wg.Add(n)
for i := 0; i < n; i++ {
go func() {
defer t.wg.Done()
for b := range t.bundlec {
bctx := ctx
cancel := func() {}
if timeout != 0 {
bctx, cancel = context.WithTimeout(ctx, timeout)
}
t.publishMessageBundle(bctx, b)
cancel()
}
}()
}
t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) {
t.bundlec <- items.([]*bundledMessage)
})
t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold
t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold
if t.bundler.BundleCountThreshold > MaxPublishRequestCount {
t.bundler.BundleCountThreshold = MaxPublishRequestCount
}
t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold
t.bundler.BufferedByteLimit = maxInt
t.bundler.BundleByteLimit = MaxPublishRequestBytes
}
func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) {
msgs := make([]*Message, len(bms))
for i, bm := range bms {
msgs[i], bm.msg = bm.msg, nil // release bm.msg for GC
}
ids, err := t.s.publishMessages(ctx, t.name, msgs)
for i, bm := range bms {
if err != nil {
bm.res.set("", err)
} else {
bm.res.set(ids[i], nil)
}
}
}

187
vendor/cloud.google.com/go/pubsub/topic_test.go generated vendored Normal file
View File

@@ -0,0 +1,187 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"net"
"reflect"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type topicListService struct {
service
topics []string
err error
t *testing.T // for error logging.
}
func (s *topicListService) newNextStringFunc() nextStringFunc {
return func() (string, error) {
if len(s.topics) == 0 {
return "", iterator.Done
}
tn := s.topics[0]
s.topics = s.topics[1:]
return tn, s.err
}
}
func (s *topicListService) listProjectTopics(ctx context.Context, projName string) nextStringFunc {
if projName != "projects/projid" {
s.t.Fatalf("unexpected call: projName: %q", projName)
return nil
}
return s.newNextStringFunc()
}
func checkTopicListing(t *testing.T, want []string) {
s := &topicListService{topics: want, t: t}
c := &Client{projectID: "projid", s: s}
topics, err := slurpTopics(c.Topics(context.Background()))
if err != nil {
t.Errorf("error listing topics: %v", err)
}
got := topicNames(topics)
if !reflect.DeepEqual(got, want) {
t.Errorf("topic list: got: %v, want: %v", got, want)
}
if len(s.topics) != 0 {
t.Errorf("outstanding topics: %v", s.topics)
}
}
// All returns the remaining topics from this iterator.
func slurpTopics(it *TopicIterator) ([]*Topic, error) {
var topics []*Topic
for {
switch topic, err := it.Next(); err {
case nil:
topics = append(topics, topic)
case iterator.Done:
return topics, nil
default:
return nil, err
}
}
}
func TestTopicID(t *testing.T) {
const id = "id"
serv := &topicListService{
topics: []string{"projects/projid/topics/t1", "projects/projid/topics/t2"},
t: t,
}
c := &Client{projectID: "projid", s: serv}
s := c.Topic(id)
if got, want := s.ID(), id; got != want {
t.Errorf("Token.ID() = %q; want %q", got, want)
}
want := []string{"t1", "t2"}
topics, err := slurpTopics(c.Topics(context.Background()))
if err != nil {
t.Errorf("error listing topics: %v", err)
}
for i, topic := range topics {
if got, want := topic.ID(), want[i]; got != want {
t.Errorf("Token.ID() = %q; want %q", got, want)
}
}
}
func TestListTopics(t *testing.T) {
checkTopicListing(t, []string{
"projects/projid/topics/t1",
"projects/projid/topics/t2",
"projects/projid/topics/t3",
"projects/projid/topics/t4"})
}
func TestListCompletelyEmptyTopics(t *testing.T) {
var want []string
checkTopicListing(t, want)
}
func TestStopPublishOrder(t *testing.T) {
// Check that Stop doesn't panic if called before Publish.
// Also that Publish after Stop returns the right error.
ctx := context.Background()
c := &Client{projectID: "projid"}
topic := c.Topic("t")
topic.Stop()
r := topic.Publish(ctx, &Message{})
_, err := r.Get(ctx)
if err != errTopicStopped {
t.Errorf("got %v, want errTopicStopped", err)
}
}
func TestPublishTimeout(t *testing.T) {
ctx := context.Background()
serv := grpc.NewServer()
pubsubpb.RegisterPublisherServer(serv, &alwaysFailPublish{})
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
t.Fatal(err)
}
s, err := newPubSubService(context.Background(), []option.ClientOption{option.WithGRPCConn(conn)})
if err != nil {
t.Fatal(err)
}
c := &Client{s: s}
topic := c.Topic("t")
topic.PublishSettings.Timeout = 3 * time.Second
r := topic.Publish(ctx, &Message{})
defer topic.Stop()
select {
case <-r.Ready():
_, err = r.Get(ctx)
if err != context.DeadlineExceeded {
t.Fatalf("got %v, want context.DeadlineExceeded", err)
}
case <-time.After(2 * topic.PublishSettings.Timeout):
t.Fatal("timed out")
}
}
type alwaysFailPublish struct {
pubsubpb.PublisherServer
}
func (s *alwaysFailPublish) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) {
return nil, grpc.Errorf(codes.Unavailable, "try again")
}
func topicNames(topics []*Topic) []string {
var names []string
for _, topic := range topics {
names = append(names, topic.name)
}
return names
}

63
vendor/cloud.google.com/go/pubsub/utils_test.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pubsub
import (
"time"
"golang.org/x/net/context"
)
type modDeadlineCall struct {
subName string
deadline time.Duration
ackIDs []string
}
type acknowledgeCall struct {
subName string
ackIDs []string
}
type testService struct {
service
// The arguments of each call to modifyAckDealine are written to this channel.
modDeadlineCalled chan modDeadlineCall
// The arguments of each call to acknowledge are written to this channel.
acknowledgeCalled chan acknowledgeCall
}
func (s *testService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error {
s.modDeadlineCalled <- modDeadlineCall{
subName: subName,
deadline: deadline,
ackIDs: ackIDs,
}
return nil
}
func (s *testService) acknowledge(ctx context.Context, subName string, ackIDs []string) error {
s.acknowledgeCalled <- acknowledgeCall{
subName: subName,
ackIDs: ackIDs,
}
return nil
}
func (s *testService) splitAckIDs(ids []string) ([]string, []string) {
return ids, nil
}