mirror of
https://github.com/restic/restic.git
synced 2025-08-24 16:07:28 +00:00
Vendor dependencies for GCS
This commit is contained in:
252
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
Normal file
252
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
type ACLRole string
|
||||
|
||||
const (
|
||||
RoleOwner ACLRole = "OWNER"
|
||||
RoleReader ACLRole = "READER"
|
||||
RoleWriter ACLRole = "WRITER"
|
||||
)
|
||||
|
||||
// ACLEntity refers to a user or group.
|
||||
// They are sometimes referred to as grantees.
|
||||
//
|
||||
// It could be in the form of:
|
||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
||||
// "domain-<domain>" and "project-team-<projectId>".
|
||||
//
|
||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
||||
type ACLEntity string
|
||||
|
||||
const (
|
||||
AllUsers ACLEntity = "allUsers"
|
||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
|
||||
type ACLRule struct {
|
||||
Entity ACLEntity
|
||||
Role ACLRole
|
||||
}
|
||||
|
||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||
type ACLHandle struct {
|
||||
c *Client
|
||||
bucket string
|
||||
object string
|
||||
isDefault bool
|
||||
userProject string // for requester-pays buckets
|
||||
}
|
||||
|
||||
// Delete permanently deletes the ACL entry for the given entity.
|
||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
|
||||
if a.object != "" {
|
||||
return a.objectDelete(ctx, entity)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultDelete(ctx, entity)
|
||||
}
|
||||
return a.bucketDelete(ctx, entity)
|
||||
}
|
||||
|
||||
// Set sets the permission level for the given entity.
|
||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
if a.object != "" {
|
||||
return a.objectSet(ctx, entity, role, false)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.objectSet(ctx, entity, role, true)
|
||||
}
|
||||
return a.bucketSet(ctx, entity, role)
|
||||
}
|
||||
|
||||
// List retrieves ACL entries.
|
||||
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
|
||||
if a.object != "" {
|
||||
return a.objectList(ctx)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultList(ctx)
|
||||
}
|
||||
return a.bucketList(ctx)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
|
||||
}
|
||||
return toACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
|
||||
}
|
||||
r := make([]ACLRule, len(acls.Items))
|
||||
for i, v := range acls.Items {
|
||||
r[i].Entity = ACLEntity(v.Entity)
|
||||
r[i].Role = ACLRole(v.Role)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(req, ctx)
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
|
||||
}
|
||||
return toACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
}
|
||||
a.configureCall(req, ctx)
|
||||
err := runWithRetry(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if isBucketDefault {
|
||||
return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
} else {
|
||||
return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(call interface {
|
||||
Header() http.Header
|
||||
}, ctx context.Context) {
|
||||
vc := reflect.ValueOf(call)
|
||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||
if a.userProject != "" {
|
||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
}
|
||||
|
||||
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
||||
r := make([]ACLRule, 0, len(items))
|
||||
for _, item := range items {
|
||||
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
|
||||
}
|
||||
return r
|
||||
}
|
590
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
Normal file
590
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
Normal file
@@ -0,0 +1,590 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// BucketHandle provides operations on a Google Cloud Storage bucket.
|
||||
// Use Client.Bucket to get a handle.
|
||||
type BucketHandle struct {
|
||||
c *Client
|
||||
name string
|
||||
acl ACLHandle
|
||||
defaultObjectACL ACLHandle
|
||||
conds *BucketConditions
|
||||
userProject string // project for requester-pays buckets
|
||||
}
|
||||
|
||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
||||
// This call does not perform any network operations.
|
||||
//
|
||||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
||||
// underscores, and dots. The full specification for valid bucket names can be
|
||||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
return &BucketHandle{
|
||||
c: c,
|
||||
name: name,
|
||||
acl: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
},
|
||||
defaultObjectACL: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
isDefault: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates the Bucket in the project.
|
||||
// If attrs is nil the API defaults will be used.
|
||||
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
bkt = attrs.toRawBucket()
|
||||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
}
|
||||
bkt.Name = b.name
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
func (b *BucketHandle) Delete(ctx context.Context) error {
|
||||
req, err := b.newDeleteCall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
||||
// This controls who can list, create or overwrite the objects in a bucket.
|
||||
// This call does not perform any network operations.
|
||||
func (b *BucketHandle) ACL() *ACLHandle {
|
||||
return &b.acl
|
||||
}
|
||||
|
||||
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
|
||||
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
|
||||
// This call does not perform any network operations.
|
||||
func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
||||
return &b.defaultObjectACL
|
||||
}
|
||||
|
||||
// Object returns an ObjectHandle, which provides operations on the named object.
|
||||
// This call does not perform any network operations.
|
||||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
return &ObjectHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
object: name,
|
||||
acl: ACLHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
object: name,
|
||||
userProject: b.userProject,
|
||||
},
|
||||
gen: -1,
|
||||
userProject: b.userProject,
|
||||
}
|
||||
}
|
||||
|
||||
// Attrs returns the metadata for the bucket.
|
||||
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
|
||||
req, err := b.newGetCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = runWithRetry(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(resp), nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
|
||||
req, err := b.newPatchCall(&uattrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(jba): retry iff metagen is set?
|
||||
rb, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rb), nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
rb := uattrs.toRawBucket()
|
||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
|
||||
type BucketAttrs struct {
|
||||
// Name is the name of the bucket.
|
||||
Name string
|
||||
|
||||
// ACL is the list of access control rules on the bucket.
|
||||
ACL []ACLRule
|
||||
|
||||
// DefaultObjectACL is the list of access controls to
|
||||
// apply to new objects when no object ACL is provided.
|
||||
DefaultObjectACL []ACLRule
|
||||
|
||||
// Location is the location of the bucket. It defaults to "US".
|
||||
Location string
|
||||
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
MetaGeneration int64
|
||||
|
||||
// StorageClass is the default storage class of the bucket. This defines
|
||||
// how objects in the bucket are stored and determines the SLA
|
||||
// and the cost of storage. Typical values are "MULTI_REGIONAL",
|
||||
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
|
||||
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
|
||||
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
|
||||
// the bucket's location settings.
|
||||
StorageClass string
|
||||
|
||||
// Created is the creation time of the bucket.
|
||||
Created time.Time
|
||||
|
||||
// VersioningEnabled reports whether this bucket has versioning enabled.
|
||||
// This field is read-only.
|
||||
VersioningEnabled bool
|
||||
|
||||
// Labels are the bucket's labels.
|
||||
Labels map[string]string
|
||||
|
||||
// RequesterPays reports whether the bucket is a Requester Pays bucket.
|
||||
RequesterPays bool
|
||||
}
|
||||
|
||||
func newBucket(b *raw.Bucket) *BucketAttrs {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
bucket := &BucketAttrs{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
MetaGeneration: b.Metageneration,
|
||||
StorageClass: b.StorageClass,
|
||||
Created: convertTime(b.TimeCreated),
|
||||
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
|
||||
Labels: b.Labels,
|
||||
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
|
||||
}
|
||||
acl := make([]ACLRule, len(b.Acl))
|
||||
for i, rule := range b.Acl {
|
||||
acl[i] = ACLRule{
|
||||
Entity: ACLEntity(rule.Entity),
|
||||
Role: ACLRole(rule.Role),
|
||||
}
|
||||
}
|
||||
bucket.ACL = acl
|
||||
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
|
||||
for i, rule := range b.DefaultObjectAcl {
|
||||
objACL[i] = ACLRule{
|
||||
Entity: ACLEntity(rule.Entity),
|
||||
Role: ACLRole(rule.Role),
|
||||
}
|
||||
}
|
||||
bucket.DefaultObjectACL = objACL
|
||||
return bucket
|
||||
}
|
||||
|
||||
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
|
||||
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||
var acl []*raw.BucketAccessControl
|
||||
if len(b.ACL) > 0 {
|
||||
acl = make([]*raw.BucketAccessControl, len(b.ACL))
|
||||
for i, rule := range b.ACL {
|
||||
acl[i] = &raw.BucketAccessControl{
|
||||
Entity: string(rule.Entity),
|
||||
Role: string(rule.Role),
|
||||
}
|
||||
}
|
||||
}
|
||||
dACL := toRawObjectACL(b.DefaultObjectACL)
|
||||
// Copy label map.
|
||||
var labels map[string]string
|
||||
if len(b.Labels) > 0 {
|
||||
labels = make(map[string]string, len(b.Labels))
|
||||
for k, v := range b.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
// Ignore VersioningEnabled if it is false. This is OK because
|
||||
// we only call this method when creating a bucket, and by default
|
||||
// new buckets have versioning off.
|
||||
var v *raw.BucketVersioning
|
||||
if b.VersioningEnabled {
|
||||
v = &raw.BucketVersioning{Enabled: true}
|
||||
}
|
||||
var bb *raw.BucketBilling
|
||||
if b.RequesterPays {
|
||||
bb = &raw.BucketBilling{RequesterPays: true}
|
||||
}
|
||||
return &raw.Bucket{
|
||||
Name: b.Name,
|
||||
DefaultObjectAcl: dACL,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: acl,
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
}
|
||||
}
|
||||
|
||||
type BucketAttrsToUpdate struct {
|
||||
// VersioningEnabled, if set, updates whether the bucket uses versioning.
|
||||
VersioningEnabled optional.Bool
|
||||
|
||||
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
|
||||
RequesterPays optional.Bool
|
||||
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
||||
// SetLabel causes a label to be added or modified when ua is used
|
||||
// in a call to Bucket.Update.
|
||||
func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
|
||||
if ua.setLabels == nil {
|
||||
ua.setLabels = map[string]string{}
|
||||
}
|
||||
ua.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted when ua is used in a
|
||||
// call to Bucket.Update.
|
||||
func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
|
||||
if ua.deleteLabels == nil {
|
||||
ua.deleteLabels = map[string]bool{}
|
||||
}
|
||||
ua.deleteLabels[name] = true
|
||||
}
|
||||
|
||||
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||
rb := &raw.Bucket{}
|
||||
if ua.VersioningEnabled != nil {
|
||||
rb.Versioning = &raw.BucketVersioning{
|
||||
Enabled: optional.ToBool(ua.VersioningEnabled),
|
||||
ForceSendFields: []string{"Enabled"},
|
||||
}
|
||||
}
|
||||
if ua.RequesterPays != nil {
|
||||
rb.Billing = &raw.BucketBilling{
|
||||
RequesterPays: optional.ToBool(ua.RequesterPays),
|
||||
ForceSendFields: []string{"RequesterPays"},
|
||||
}
|
||||
}
|
||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
||||
rb.Labels = map[string]string{}
|
||||
for k, v := range ua.setLabels {
|
||||
rb.Labels[k] = v
|
||||
}
|
||||
if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
|
||||
rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
|
||||
}
|
||||
for l := range ua.deleteLabels {
|
||||
rb.NullFields = append(rb.NullFields, "Labels."+l)
|
||||
}
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// If returns a new BucketHandle that applies a set of preconditions.
|
||||
// Preconditions already set on the BucketHandle are ignored.
|
||||
// Operations on the new handle will only occur if the preconditions are
|
||||
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
|
||||
// and MetagenerationNotMatch.
|
||||
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
|
||||
b2 := *b
|
||||
b2.conds = &conds
|
||||
return &b2
|
||||
}
|
||||
|
||||
// BucketConditions constrain bucket methods to act on specific metagenerations.
|
||||
//
|
||||
// The zero value is an empty set of constraints.
|
||||
type BucketConditions struct {
|
||||
// MetagenerationMatch specifies that the bucket must have the given
|
||||
// metageneration for the operation to occur.
|
||||
// If MetagenerationMatch is zero, it has no effect.
|
||||
MetagenerationMatch int64
|
||||
|
||||
// MetagenerationNotMatch specifies that the bucket must not have the given
|
||||
// metageneration for the operation to occur.
|
||||
// If MetagenerationNotMatch is zero, it has no effect.
|
||||
MetagenerationNotMatch int64
|
||||
}
|
||||
|
||||
func (c *BucketConditions) validate(method string) error {
|
||||
if *c == (BucketConditions{}) {
|
||||
return fmt.Errorf("storage: %s: empty conditions", method)
|
||||
}
|
||||
if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
|
||||
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserProject returns a new BucketHandle that passes the project ID as the user
|
||||
// project for all subsequent calls. A user project is required for all operations
|
||||
// on requester-pays buckets.
|
||||
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
||||
b2 := *b
|
||||
b2.userProject = projectID
|
||||
b2.acl.userProject = projectID
|
||||
b2.defaultObjectACL.userProject = projectID
|
||||
return &b2
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
// call is something that quacks like a *raw.WhateverCall.
|
||||
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate(method); err != nil {
|
||||
return err
|
||||
}
|
||||
cval := reflect.ValueOf(call)
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
|
||||
}
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
||||
// If q is nil, no filtering is done.
|
||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||
it := &ObjectIterator{
|
||||
ctx: ctx,
|
||||
bucket: b,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
if q != nil {
|
||||
it.query = *q
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
||||
type ObjectIterator struct {
|
||||
ctx context.Context
|
||||
bucket *BucketHandle
|
||||
query Query
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*ObjectAttrs
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
//
|
||||
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
|
||||
// have a non-empty Prefix field, and a zero value for all other fields. These
|
||||
// represent prefixes.
|
||||
func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.Versions(it.query.Versions)
|
||||
req.PageToken(pageToken)
|
||||
if it.bucket.userProject != "" {
|
||||
req.UserProject(it.bucket.userProject)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
}
|
||||
for _, prefix := range resp.Prefixes {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// TODO(jbd): Add storage.buckets.update.
|
||||
|
||||
// Buckets returns an iterator over the buckets in the project. You may
|
||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
||||
// whose names begin with the prefix. By default, all buckets in the project
|
||||
// are returned.
|
||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
||||
it := &BucketIterator{
|
||||
ctx: ctx,
|
||||
client: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.buckets) },
|
||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A BucketIterator is an iterator over BucketAttrs.
|
||||
type BucketIterator struct {
|
||||
// Prefix restricts the iterator to buckets whose names begin with it.
|
||||
Prefix string
|
||||
|
||||
ctx context.Context
|
||||
client *Client
|
||||
projectID string
|
||||
buckets []*BucketAttrs
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := it.buckets[0]
|
||||
it.buckets = it.buckets[1:]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
var err error
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.buckets = append(it.buckets, newBucket(item))
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
211
vendor/cloud.google.com/go/storage/bucket_test.go
generated
vendored
Normal file
211
vendor/cloud.google.com/go/storage/bucket_test.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
func TestBucketAttrsToRawBucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
attrs := &BucketAttrs{
|
||||
Name: "name",
|
||||
ACL: []ACLRule{{Entity: "bob@example.com", Role: RoleOwner}},
|
||||
DefaultObjectACL: []ACLRule{{Entity: AllUsers, Role: RoleReader}},
|
||||
Location: "loc",
|
||||
StorageClass: "class",
|
||||
VersioningEnabled: false,
|
||||
// should be ignored:
|
||||
MetaGeneration: 39,
|
||||
Created: time.Now(),
|
||||
Labels: map[string]string{"label": "value"},
|
||||
}
|
||||
got := attrs.toRawBucket()
|
||||
want := &raw.Bucket{
|
||||
Name: "name",
|
||||
Acl: []*raw.BucketAccessControl{
|
||||
{Entity: "bob@example.com", Role: "OWNER"},
|
||||
},
|
||||
DefaultObjectAcl: []*raw.ObjectAccessControl{
|
||||
{Entity: "allUsers", Role: "READER"},
|
||||
},
|
||||
Location: "loc",
|
||||
StorageClass: "class",
|
||||
Versioning: nil, // ignore VersioningEnabled if flase
|
||||
Labels: map[string]string{"label": "value"},
|
||||
}
|
||||
msg, ok, err := pretty.Diff(want, got)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
|
||||
attrs.VersioningEnabled = true
|
||||
attrs.RequesterPays = true
|
||||
got = attrs.toRawBucket()
|
||||
want.Versioning = &raw.BucketVersioning{Enabled: true}
|
||||
want.Billing = &raw.BucketBilling{RequesterPays: true}
|
||||
msg, ok, err = pretty.Diff(want, got)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBucketAttrsToUpdateToRawBucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
au := &BucketAttrsToUpdate{
|
||||
VersioningEnabled: false,
|
||||
RequesterPays: false,
|
||||
}
|
||||
au.SetLabel("a", "foo")
|
||||
au.DeleteLabel("b")
|
||||
au.SetLabel("c", "")
|
||||
got := au.toRawBucket()
|
||||
want := &raw.Bucket{
|
||||
Versioning: &raw.BucketVersioning{
|
||||
Enabled: false,
|
||||
ForceSendFields: []string{"Enabled"},
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"a": "foo",
|
||||
"c": "",
|
||||
},
|
||||
Billing: &raw.BucketBilling{
|
||||
RequesterPays: false,
|
||||
ForceSendFields: []string{"RequesterPays"},
|
||||
},
|
||||
NullFields: []string{"Labels.b"},
|
||||
}
|
||||
msg, ok, err := pretty.Diff(want, got)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
|
||||
var au2 BucketAttrsToUpdate
|
||||
au2.DeleteLabel("b")
|
||||
got = au2.toRawBucket()
|
||||
want = &raw.Bucket{
|
||||
Labels: map[string]string{},
|
||||
ForceSendFields: []string{"Labels"},
|
||||
NullFields: []string{"Labels.b"},
|
||||
}
|
||||
msg, ok, err = pretty.Diff(want, got)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCallBuilders(t *testing.T) {
|
||||
rc, err := raw.New(&http.Client{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := &Client{raw: rc}
|
||||
const metagen = 17
|
||||
|
||||
b := c.Bucket("name")
|
||||
bm := b.If(BucketConditions{MetagenerationMatch: metagen}).UserProject("p")
|
||||
|
||||
equal := func(x, y interface{}) bool {
|
||||
return testutil.Equal(x, y,
|
||||
cmp.AllowUnexported(
|
||||
raw.BucketsGetCall{},
|
||||
raw.BucketsDeleteCall{},
|
||||
raw.BucketsPatchCall{},
|
||||
),
|
||||
cmp.FilterPath(func(p cmp.Path) bool {
|
||||
return p[len(p)-1].Type() == reflect.TypeOf(&raw.Service{})
|
||||
}, cmp.Ignore()),
|
||||
)
|
||||
}
|
||||
|
||||
for i, test := range []struct {
|
||||
callFunc func(*BucketHandle) (interface{}, error)
|
||||
want interface {
|
||||
Header() http.Header
|
||||
}
|
||||
metagenFunc func(interface{})
|
||||
}{
|
||||
{
|
||||
func(b *BucketHandle) (interface{}, error) { return b.newGetCall() },
|
||||
rc.Buckets.Get("name").Projection("full"),
|
||||
func(req interface{}) { req.(*raw.BucketsGetCall).IfMetagenerationMatch(metagen).UserProject("p") },
|
||||
},
|
||||
{
|
||||
func(b *BucketHandle) (interface{}, error) { return b.newDeleteCall() },
|
||||
rc.Buckets.Delete("name"),
|
||||
func(req interface{}) { req.(*raw.BucketsDeleteCall).IfMetagenerationMatch(metagen).UserProject("p") },
|
||||
},
|
||||
{
|
||||
func(b *BucketHandle) (interface{}, error) {
|
||||
return b.newPatchCall(&BucketAttrsToUpdate{VersioningEnabled: false})
|
||||
},
|
||||
rc.Buckets.Patch("name", &raw.Bucket{
|
||||
Versioning: &raw.BucketVersioning{Enabled: false, ForceSendFields: []string{"Enabled"}},
|
||||
}).Projection("full"),
|
||||
func(req interface{}) { req.(*raw.BucketsPatchCall).IfMetagenerationMatch(metagen).UserProject("p") },
|
||||
},
|
||||
} {
|
||||
got, err := test.callFunc(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setClientHeader(test.want.Header())
|
||||
if !equal(got, test.want) {
|
||||
t.Errorf("#%d: got %#v, want %#v", i, got, test.want)
|
||||
}
|
||||
got, err = test.callFunc(bm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
test.metagenFunc(test.want)
|
||||
if !equal(got, test.want) {
|
||||
t.Errorf("#%d:\ngot %#v\nwant %#v", i, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
// Error.
|
||||
bm = b.If(BucketConditions{MetagenerationMatch: 1, MetagenerationNotMatch: 2})
|
||||
if _, err := bm.newGetCall(); err == nil {
|
||||
t.Errorf("got nil, want error")
|
||||
}
|
||||
if _, err := bm.newDeleteCall(); err == nil {
|
||||
t.Errorf("got nil, want error")
|
||||
}
|
||||
if _, err := bm.newPatchCall(&BucketAttrsToUpdate{}); err == nil {
|
||||
t.Errorf("got nil, want error")
|
||||
}
|
||||
}
|
201
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
Normal file
201
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
// You can immediately call Run on the returned Copier, or
|
||||
// you can configure it first.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
||||
// in which case the user project of src is billed.
|
||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
||||
return &Copier{dst: dst, src: src}
|
||||
}
|
||||
|
||||
// A Copier copies a source object to a destination.
|
||||
type Copier struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// RewriteToken can be set before calling Run to resume a copy
|
||||
// operation. After Run returns a non-nil error, RewriteToken will
|
||||
// have been updated to contain the value needed to resume the copy.
|
||||
RewriteToken string
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far and the total size in bytes of the source object.
|
||||
//
|
||||
// ProgressFunc is intended to make upload progress available to the
|
||||
// application. For example, the implementation of ProgressFunc may update
|
||||
// a progress bar in the application's UI, or log the result of
|
||||
// float64(copiedBytes)/float64(totalBytes).
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
||||
|
||||
dst, src *ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
if err := c.src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
//
|
||||
// The encryption key for the destination object will be used to decrypt all
|
||||
// source objects and encrypt the destination object. It is an error
|
||||
// to specify an encryption key for any of the source objects.
|
||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
||||
return &Composer{dst: dst, srcs: srcs}
|
||||
}
|
||||
|
||||
// A Composer composes source objects into a destination object.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed.
|
||||
type Composer struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
dst *ObjectHandle
|
||||
srcs []*ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the compose operation.
|
||||
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if src.bucket != c.dst.bucket {
|
||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
||||
}
|
||||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
161
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
Normal file
161
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package storage provides an easy way to work with Google Cloud Storage.
|
||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
||||
|
||||
More information about Google Cloud Storage is available at
|
||||
https://cloud.google.com/storage/docs.
|
||||
|
||||
All of the methods of this package use exponential backoff to retry calls
|
||||
that fail with certain errors, as described in
|
||||
https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets. You can use the
|
||||
standard Go io.Reader and io.Writer interfaces to read and write
|
||||
object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will overwrite whatever is there.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see
|
||||
https://cloud.google.com/storage/docs/access-control/iam).
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. Conditions let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
For example, say you've read an object's metadata into objAttrs. Now
|
||||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
You don't need to create a client to do this. See the documentation of
|
||||
SignedURL for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
543
vendor/cloud.google.com/go/storage/example_test.go
generated
vendored
Normal file
543
vendor/cloud.google.com/go/storage/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,543 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Use the client.
|
||||
|
||||
// Close the client when finished.
|
||||
if err := client.Close(); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleNewClient_auth() {
|
||||
ctx := context.Background()
|
||||
// Use Google Application Default Credentials to authorize and authenticate the client.
|
||||
// More information about Application Default Credentials and how to enable is at
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use the client.
|
||||
|
||||
// Close the client when finished.
|
||||
if err := client.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleBucketHandle_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleBucketHandle_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
if err := client.Bucket("my-bucket").Delete(ctx); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleBucketHandle_Attrs() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
attrs, err := client.Bucket("my-bucket").Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
}
|
||||
|
||||
func ExampleBucketHandle_Update() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Enable versioning in the bucket, regardless of its previous value.
|
||||
attrs, err := client.Bucket("my-bucket").Update(ctx,
|
||||
storage.BucketAttrsToUpdate{VersioningEnabled: true})
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
}
|
||||
|
||||
// If your update is based on the bucket's previous attributes, match the
|
||||
// metageneration number to make sure the bucket hasn't changed since you read it.
|
||||
func ExampleBucketHandle_Update_readModifyWrite() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
b := client.Bucket("my-bucket")
|
||||
attrs, err := b.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
var au storage.BucketAttrsToUpdate
|
||||
au.SetLabel("lab", attrs.Labels["lab"]+"-more")
|
||||
if attrs.Labels["delete-me"] == "yes" {
|
||||
au.DeleteLabel("delete-me")
|
||||
}
|
||||
attrs, err = b.
|
||||
If(storage.BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).
|
||||
Update(ctx, au)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
}
|
||||
|
||||
func ExampleClient_Buckets() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
it := client.Bucket("my-bucket")
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleBucketIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
it := client.Buckets(ctx, "my-project")
|
||||
for {
|
||||
bucketAttrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(bucketAttrs)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleBucketHandle_Objects() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
it := client.Bucket("my-bucket").Objects(ctx, nil)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleObjectIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
it := client.Bucket("my-bucket").Objects(ctx, nil)
|
||||
for {
|
||||
objAttrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(objAttrs)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSignedURL() {
|
||||
pkey, err := ioutil.ReadFile("my-private-key.pem")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
url, err := storage.SignedURL("my-bucket", "my-object", &storage.SignedURLOptions{
|
||||
GoogleAccessID: "xxx@developer.gserviceaccount.com",
|
||||
PrivateKey: pkey,
|
||||
Method: "GET",
|
||||
Expires: time.Now().Add(48 * time.Hour),
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_Attrs() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
objAttrs, err := client.Bucket("my-bucket").Object("my-object").Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(objAttrs)
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_Attrs_withConditions() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
obj := client.Bucket("my-bucket").Object("my-object")
|
||||
// Read the object.
|
||||
objAttrs1, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Do something else for a while.
|
||||
time.Sleep(5 * time.Minute)
|
||||
// Now read the same contents, even if the object has been written since the last read.
|
||||
objAttrs2, err := obj.Generation(objAttrs1.Generation).Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(objAttrs1, objAttrs2)
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_Update() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Change only the content type of the object.
|
||||
objAttrs, err := client.Bucket("my-bucket").Object("my-object").Update(ctx, storage.ObjectAttrsToUpdate{
|
||||
ContentType: "text/html",
|
||||
ContentDisposition: "", // delete ContentDisposition
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(objAttrs)
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_NewReader() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
rc, err := client.Bucket("my-bucket").Object("my-object").NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println("file contents:", slurp)
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_NewRangeReader() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Read only the first 64K.
|
||||
rc, err := client.Bucket("bucketname").Object("filename1").NewRangeReader(ctx, 0, 64*1024)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println("first 64K of file contents:", slurp)
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_NewWriter() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx)
|
||||
_ = wc // TODO: Use the Writer.
|
||||
}
|
||||
|
||||
func ExampleWriter_Write() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx)
|
||||
wc.ContentType = "text/plain"
|
||||
wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}}
|
||||
if _, err := wc.Write([]byte("hello world")); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
if err := wc.Close(); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println("updated object:", wc.Attrs())
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// To delete multiple objects in a bucket, list them with an
|
||||
// ObjectIterator, then Delete them.
|
||||
|
||||
// If you are using this package on the App Engine Flex runtime,
|
||||
// you can init a bucket client with your app's default bucket name.
|
||||
// See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName.
|
||||
bucket := client.Bucket("my-bucket")
|
||||
it := bucket.Objects(ctx, nil)
|
||||
for {
|
||||
objAttrs, err := it.Next()
|
||||
if err != nil && err != iterator.Done {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
fmt.Println("deleted all object items in the bucket specified.")
|
||||
}
|
||||
|
||||
func ExampleACLHandle_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// No longer grant access to the bucket to everyone on the Internet.
|
||||
if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleACLHandle_Set() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Let any authenticated user read my-bucket/my-object.
|
||||
obj := client.Bucket("my-bucket").Object("my-object")
|
||||
if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleACLHandle_List() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// List the default object ACLs for my-bucket.
|
||||
aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
fmt.Println(aclRules)
|
||||
}
|
||||
|
||||
func ExampleCopier_Run() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
src := client.Bucket("bucketname").Object("file1")
|
||||
dst := client.Bucket("another-bucketname").Object("file2")
|
||||
|
||||
// Copy content and modify metadata.
|
||||
copier := dst.CopierFrom(src)
|
||||
copier.ContentType = "text/plain"
|
||||
attrs, err := copier.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error, possibly resuming with copier.RewriteToken.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
|
||||
// Just copy content.
|
||||
attrs, err = dst.CopierFrom(src).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error. No way to resume.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
}
|
||||
|
||||
func ExampleCopier_Run_progress() {
|
||||
// Display progress across multiple rewrite RPCs.
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
src := client.Bucket("bucketname").Object("file1")
|
||||
dst := client.Bucket("another-bucketname").Object("file2")
|
||||
|
||||
copier := dst.CopierFrom(src)
|
||||
copier.ProgressFunc = func(copiedBytes, totalBytes uint64) {
|
||||
log.Printf("copy %.1f%% done", float64(copiedBytes)/float64(totalBytes)*100)
|
||||
}
|
||||
if _, err := copier.Run(ctx); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
var key1, key2 []byte
|
||||
|
||||
func ExampleObjectHandle_CopierFrom_rotateEncryptionKeys() {
|
||||
// To rotate the encryption key on an object, copy it onto itself.
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
obj := client.Bucket("bucketname").Object("obj")
|
||||
// Assume obj is encrypted with key1, and we want to change to key2.
|
||||
_, err = obj.Key(key2).CopierFrom(obj.Key(key1)).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleComposer_Run() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
bkt := client.Bucket("bucketname")
|
||||
src1 := bkt.Object("o1")
|
||||
src2 := bkt.Object("o2")
|
||||
dst := bkt.Object("o3")
|
||||
// Compose and modify metadata.
|
||||
c := dst.ComposerFrom(src1, src2)
|
||||
c.ContentType = "text/plain"
|
||||
attrs, err := c.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
// Just compose.
|
||||
attrs, err = dst.ComposerFrom(src1, src2).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(attrs)
|
||||
}
|
||||
|
||||
var gen int64
|
||||
|
||||
func ExampleObjectHandle_Generation() {
|
||||
// Read an object's contents from generation gen, regardless of the
|
||||
// current generation of the object.
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
obj := client.Bucket("my-bucket").Object("my-object")
|
||||
rc, err := obj.Generation(gen).NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
defer rc.Close()
|
||||
if _, err := io.Copy(os.Stdout, rc); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleObjectHandle_If() {
|
||||
// Read from an object only if the current generation is gen.
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
obj := client.Bucket("my-bucket").Object("my-object")
|
||||
rc, err := obj.If(storage.Conditions{GenerationMatch: gen}).NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
defer rc.Close()
|
||||
if _, err := io.Copy(os.Stdout, rc); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
||||
|
||||
var secretKey []byte
|
||||
|
||||
func ExampleObjectHandle_Key() {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
obj := client.Bucket("my-bucket").Object("my-object")
|
||||
// Encrypt the object's contents.
|
||||
w := obj.Key(secretKey).NewWriter(ctx)
|
||||
if _, err := w.Write([]byte("top secret")); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
}
|
26
vendor/cloud.google.com/go/storage/go17.go
generated
vendored
Normal file
26
vendor/cloud.google.com/go/storage/go17.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, ctx context.Context) *http.Request {
|
||||
return r.WithContext(ctx)
|
||||
}
|
108
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
Normal file
108
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/iam"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
|
||||
req := c.raw.Buckets.GetIamPolicy(resource)
|
||||
setClientHeader(req.Header())
|
||||
var rp *raw.Policy
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
rp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
|
||||
rp := iamToStoragePolicy(p)
|
||||
req := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
req := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(req.Header())
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
return &raw.Policy{
|
||||
Bindings: iamToStorageBindings(ip.Bindings),
|
||||
Etag: string(ip.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||
var rbs []*raw.PolicyBindings
|
||||
for _, ib := range ibs {
|
||||
rbs = append(rbs, &raw.PolicyBindings{
|
||||
Role: ib.Role,
|
||||
Members: ib.Members,
|
||||
})
|
||||
}
|
||||
return rbs
|
||||
}
|
||||
|
||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||
return &iampb.Policy{
|
||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||
Etag: []byte(rp.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||
var ibs []*iampb.Binding
|
||||
for _, rb := range rbs {
|
||||
ibs = append(ibs, &iampb.Binding{
|
||||
Role: rb.Role,
|
||||
Members: rb.Members,
|
||||
})
|
||||
}
|
||||
return ibs
|
||||
}
|
1518
vendor/cloud.google.com/go/storage/integration_test.go
generated
vendored
Normal file
1518
vendor/cloud.google.com/go/storage/integration_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
43
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
Normal file
43
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return true, err
|
||||
}
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
}
|
56
vendor/cloud.google.com/go/storage/invoke_test.go
generated
vendored
Normal file
56
vendor/cloud.google.com/go/storage/invoke_test.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestInvoke(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
// Time-based tests are flaky. We just make sure that invoke eventually
|
||||
// returns with the right error.
|
||||
|
||||
for _, test := range []struct {
|
||||
count int // number of times to return retryable error
|
||||
retryCode int // error code for retryable error
|
||||
err error // error to return after count returns of retryCode
|
||||
}{
|
||||
{0, 0, nil},
|
||||
{0, 0, errors.New("foo")},
|
||||
{1, 429, nil},
|
||||
{1, 429, errors.New("bar")},
|
||||
{2, 518, nil},
|
||||
{2, 599, &googleapi.Error{Code: 428}},
|
||||
} {
|
||||
counter := 0
|
||||
call := func() error {
|
||||
counter++
|
||||
if counter <= test.count {
|
||||
return &googleapi.Error{Code: test.retryCode}
|
||||
}
|
||||
return test.err
|
||||
}
|
||||
got := runWithRetry(ctx, call)
|
||||
if got != test.err {
|
||||
t.Errorf("%v: got %v, want %v", test, got, test.err)
|
||||
}
|
||||
}
|
||||
}
|
26
vendor/cloud.google.com/go/storage/not_go17.go
generated
vendored
Normal file
26
vendor/cloud.google.com/go/storage/not_go17.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, _ interface{}) *http.Request {
|
||||
// In Go 1.6 and below, ignore the context.
|
||||
return r
|
||||
}
|
74
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
Normal file
74
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
type Reader struct {
|
||||
body io.ReadCloser
|
||||
remain, size int64
|
||||
contentType string
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.body.Read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||
// Check CRC here. It would be natural to check it in Close, but
|
||||
// everybody defers Close on the assumption that it doesn't return
|
||||
// anything worth looking at.
|
||||
if r.remain == 0 && r.gotCRC != r.wantCRC {
|
||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||
r.gotCRC, r.wantCRC)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.size
|
||||
}
|
||||
|
||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||
func (r *Reader) Remain() int64 {
|
||||
return r.remain
|
||||
}
|
||||
|
||||
// ContentType returns the content type of the object.
|
||||
func (r *Reader) ContentType() string {
|
||||
return r.contentType
|
||||
}
|
1118
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
Normal file
1118
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
702
vendor/cloud.google.com/go/storage/storage_test.go
generated
vendored
Normal file
702
vendor/cloud.google.com/go/storage/storage_test.go
generated
vendored
Normal file
@@ -0,0 +1,702 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
func TestSignedURL(t *testing.T) {
|
||||
t.Parallel()
|
||||
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
|
||||
url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{
|
||||
GoogleAccessID: "xxx@clientid",
|
||||
PrivateKey: dummyKey("rsa"),
|
||||
Method: "GET",
|
||||
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
|
||||
Expires: expires,
|
||||
ContentType: "application/json",
|
||||
Headers: []string{"x-header1", "x-header2"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
want := "https://storage.googleapis.com/bucket-name/object-name?" +
|
||||
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
|
||||
"ZMw18bZVhySNYAMEX87RMyuZCUMtGLVi%2B2zU2ByiQ0Rxgij%2BhFZ5LsT" +
|
||||
"5ZPIH5h3QXB%2BiSb1URJnZo3aF0exVP%2FYR1hpg2e65w9HHt7yYjIqcg" +
|
||||
"%2FfAOIyxriFtgRYk3oAv%2FFLF62fI8iF%2BCp0fWSm%2FHggz22blVnQz" +
|
||||
"EtSP%2BuRhFle4172L%2B710sfMDtyQLKTz6W4TmRjC9ymTi8mVj95dZgyF" +
|
||||
"RXbibTdtw0JzndE0Ig4c6pU4xDPPiyaziUSVDMIpzZDJH1GYOGHxbFasba4" +
|
||||
"1rRoWWkdBnsMtHm2ck%2FsFD2leL6u8q0OpVAc4ZdxseucL4OpCy%2BCLhQ" +
|
||||
"JFQT5bqSljP0g%3D%3D"
|
||||
if url != want {
|
||||
t.Fatalf("Unexpected signed URL; found %v", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignedURL_PEMPrivateKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
|
||||
url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{
|
||||
GoogleAccessID: "xxx@clientid",
|
||||
PrivateKey: dummyKey("pem"),
|
||||
Method: "GET",
|
||||
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
|
||||
Expires: expires,
|
||||
ContentType: "application/json",
|
||||
Headers: []string{"x-header1", "x-header2"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
want := "https://storage.googleapis.com/bucket-name/object-name?" +
|
||||
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
|
||||
"gHlh63sOxJnNj22X%2B%2F4kwOSNMeqwXWr4udEfrzJPQcq1xzxA8ovMM5SOrOc%" +
|
||||
"2FuE%2Ftc9%2Bq7a42CDBwZff1PsvuJMBDaPbluU257h%2Bvxx8lHMnb%2Bg1wD1" +
|
||||
"99FiCE014MRH9TlIg%2FdXRkErosVWTy4GqAgZemmKHo0HwDGT6IovB9mdg%3D"
|
||||
if url != want {
|
||||
t.Fatalf("Unexpected signed URL; found %v", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignedURL_SignBytes(t *testing.T) {
|
||||
t.Parallel()
|
||||
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
|
||||
url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{
|
||||
GoogleAccessID: "xxx@clientid",
|
||||
SignBytes: func(b []byte) ([]byte, error) {
|
||||
return []byte("signed"), nil
|
||||
},
|
||||
Method: "GET",
|
||||
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
|
||||
Expires: expires,
|
||||
ContentType: "application/json",
|
||||
Headers: []string{"x-header1", "x-header2"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
want := "https://storage.googleapis.com/bucket-name/object-name?" +
|
||||
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
|
||||
"c2lnbmVk" // base64('signed') == 'c2lnbmVk'
|
||||
if url != want {
|
||||
t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignedURL_URLUnsafeObjectName(t *testing.T) {
|
||||
t.Parallel()
|
||||
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
|
||||
url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{
|
||||
GoogleAccessID: "xxx@clientid",
|
||||
PrivateKey: dummyKey("pem"),
|
||||
Method: "GET",
|
||||
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
|
||||
Expires: expires,
|
||||
ContentType: "application/json",
|
||||
Headers: []string{"x-header1", "x-header2"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
want := "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C?" +
|
||||
"Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" +
|
||||
"LSxs1YwXNKOa7mQv1ZAI2ao0Fuv6yXLLU7%2BQ97z2B7hYZ57OiFwQ72EdGXSiIM" +
|
||||
"JwLisEKkwoSlYCMm3uuTdgJtXXVi7SYXMfdeKaonyQwMv531KETCBTSewt8CW%2B" +
|
||||
"FaUJ5SEYG44SeJCiqeIr3GF7t90UNWs6TdFXDaKShpQzBGg%3D"
|
||||
if url != want {
|
||||
t.Fatalf("Unexpected signed URL; found %v", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignedURL_MissingOptions(t *testing.T) {
|
||||
t.Parallel()
|
||||
pk := dummyKey("rsa")
|
||||
expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00")
|
||||
var tests = []struct {
|
||||
opts *SignedURLOptions
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
&SignedURLOptions{},
|
||||
"missing required GoogleAccessID",
|
||||
},
|
||||
{
|
||||
&SignedURLOptions{GoogleAccessID: "access_id"},
|
||||
"exactly one of PrivateKey or SignedBytes must be set",
|
||||
},
|
||||
{
|
||||
&SignedURLOptions{
|
||||
GoogleAccessID: "access_id",
|
||||
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
|
||||
PrivateKey: pk,
|
||||
},
|
||||
"exactly one of PrivateKey or SignedBytes must be set",
|
||||
},
|
||||
{
|
||||
&SignedURLOptions{
|
||||
GoogleAccessID: "access_id",
|
||||
PrivateKey: pk,
|
||||
},
|
||||
"missing required method",
|
||||
},
|
||||
{
|
||||
&SignedURLOptions{
|
||||
GoogleAccessID: "access_id",
|
||||
SignBytes: func(b []byte) ([]byte, error) { return b, nil },
|
||||
},
|
||||
"missing required method",
|
||||
},
|
||||
{
|
||||
&SignedURLOptions{
|
||||
GoogleAccessID: "access_id",
|
||||
PrivateKey: pk,
|
||||
Method: "PUT",
|
||||
},
|
||||
"missing required expires",
|
||||
},
|
||||
{
|
||||
&SignedURLOptions{
|
||||
GoogleAccessID: "access_id",
|
||||
PrivateKey: pk,
|
||||
Method: "PUT",
|
||||
Expires: expires,
|
||||
MD5: "invalid",
|
||||
},
|
||||
"invalid MD5 checksum",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
_, err := SignedURL("bucket", "name", test.opts)
|
||||
if !strings.Contains(err.Error(), test.errMsg) {
|
||||
t.Errorf("expected err: %v, found: %v", test.errMsg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dummyKey(kind string) []byte {
|
||||
slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return slurp
|
||||
}
|
||||
|
||||
func TestCopyToMissingFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
var tests = []struct {
|
||||
srcBucket, srcName, destBucket, destName string
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
"mybucket", "", "mybucket", "destname",
|
||||
"name is empty",
|
||||
},
|
||||
{
|
||||
"mybucket", "srcname", "mybucket", "",
|
||||
"name is empty",
|
||||
},
|
||||
{
|
||||
"", "srcfile", "mybucket", "destname",
|
||||
"name is empty",
|
||||
},
|
||||
{
|
||||
"mybucket", "srcfile", "", "destname",
|
||||
"name is empty",
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}}))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i, test := range tests {
|
||||
src := client.Bucket(test.srcBucket).Object(test.srcName)
|
||||
dst := client.Bucket(test.destBucket).Object(test.destName)
|
||||
_, err := dst.CopierFrom(src).Run(ctx)
|
||||
if !strings.Contains(err.Error(), test.errMsg) {
|
||||
t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectNames(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Naming requirements: https://cloud.google.com/storage/docs/bucket-naming
|
||||
const maxLegalLength = 1024
|
||||
|
||||
type testT struct {
|
||||
name, want string
|
||||
}
|
||||
tests := []testT{
|
||||
// Embedded characters important in URLs.
|
||||
{"foo % bar", "foo%20%25%20bar"},
|
||||
{"foo ? bar", "foo%20%3F%20bar"},
|
||||
{"foo / bar", "foo%20/%20bar"},
|
||||
{"foo %?/ bar", "foo%20%25%3F/%20bar"},
|
||||
|
||||
// Non-Roman scripts
|
||||
{"타코", "%ED%83%80%EC%BD%94"},
|
||||
{"世界", "%E4%B8%96%E7%95%8C"},
|
||||
|
||||
// Longest legal name
|
||||
{strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)},
|
||||
|
||||
// Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode
|
||||
{"foo \u000b bar", "foo%20%0B%20bar"},
|
||||
{"foo \u000c bar", "foo%20%0C%20bar"},
|
||||
{"foo \u0085 bar", "foo%20%C2%85%20bar"},
|
||||
{"foo \u2028 bar", "foo%20%E2%80%A8%20bar"},
|
||||
{"foo \u2029 bar", "foo%20%E2%80%A9%20bar"},
|
||||
|
||||
// Null byte.
|
||||
{"foo \u0000 bar", "foo%20%00%20bar"},
|
||||
|
||||
// Non-control characters that are discouraged, but not forbidden, according to the documentation.
|
||||
{"foo # bar", "foo%20%23%20bar"},
|
||||
{"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"},
|
||||
|
||||
// Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/
|
||||
{"foo \u212b bar", "foo%20%E2%84%AB%20bar"},
|
||||
{"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"},
|
||||
{"foo \u00c5 bar", "foo%20%C3%85%20bar"},
|
||||
|
||||
// Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10)
|
||||
{"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"},
|
||||
{"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"},
|
||||
{"foo \uac00 bar", "foo%20%EA%B0%80%20bar"},
|
||||
}
|
||||
|
||||
// C0 control characters not forbidden by the docs.
|
||||
var runes []rune
|
||||
for r := rune(0x01); r <= rune(0x1f); r++ {
|
||||
if r != '\u000a' && r != '\u000d' {
|
||||
runes = append(runes, r)
|
||||
}
|
||||
}
|
||||
tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"})
|
||||
|
||||
// C1 control characters, plus DEL.
|
||||
runes = nil
|
||||
for r := rune(0x7f); r <= rune(0x9f); r++ {
|
||||
runes = append(runes, r)
|
||||
}
|
||||
tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"})
|
||||
|
||||
opts := &SignedURLOptions{
|
||||
GoogleAccessID: "xxx@clientid",
|
||||
PrivateKey: dummyKey("rsa"),
|
||||
Method: "GET",
|
||||
MD5: "ICy5YqxZB1uWSwcVLSNLcA==",
|
||||
Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC),
|
||||
ContentType: "application/json",
|
||||
Headers: []string{"x-header1", "x-header2"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
g, err := SignedURL("bucket-name", test.name, opts)
|
||||
if err != nil {
|
||||
t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err)
|
||||
}
|
||||
if w := "/bucket-name/" + test.want; !strings.Contains(g, w) {
|
||||
t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondition(t *testing.T) {
|
||||
t.Parallel()
|
||||
gotReq := make(chan *http.Request, 1)
|
||||
hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
io.Copy(ioutil.Discard, r.Body)
|
||||
gotReq <- r
|
||||
w.WriteHeader(200)
|
||||
})
|
||||
defer close()
|
||||
ctx := context.Background()
|
||||
c, err := NewClient(ctx, option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
obj := c.Bucket("buck").Object("obj")
|
||||
dst := c.Bucket("dstbuck").Object("dst")
|
||||
tests := []struct {
|
||||
fn func()
|
||||
want string
|
||||
}{
|
||||
{
|
||||
func() { obj.Generation(1234).NewReader(ctx) },
|
||||
"GET /buck/obj?generation=1234",
|
||||
},
|
||||
{
|
||||
func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) },
|
||||
"GET /buck/obj?ifGenerationMatch=1234",
|
||||
},
|
||||
{
|
||||
func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) },
|
||||
"GET /buck/obj?ifGenerationNotMatch=1234",
|
||||
},
|
||||
{
|
||||
func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) },
|
||||
"GET /buck/obj?ifMetagenerationMatch=1234",
|
||||
},
|
||||
{
|
||||
func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) },
|
||||
"GET /buck/obj?ifMetagenerationNotMatch=1234",
|
||||
},
|
||||
{
|
||||
func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) },
|
||||
"GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full",
|
||||
},
|
||||
|
||||
{
|
||||
func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) },
|
||||
"PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full",
|
||||
},
|
||||
{
|
||||
func() { obj.Generation(1234).Delete(ctx) },
|
||||
"DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234",
|
||||
},
|
||||
{
|
||||
func() {
|
||||
w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx)
|
||||
w.ContentType = "text/plain"
|
||||
w.Close()
|
||||
},
|
||||
"POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart",
|
||||
},
|
||||
{
|
||||
func() {
|
||||
w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx)
|
||||
w.ContentType = "text/plain"
|
||||
w.Close()
|
||||
},
|
||||
"POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart",
|
||||
},
|
||||
{
|
||||
func() {
|
||||
dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx)
|
||||
},
|
||||
"POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
tt.fn()
|
||||
select {
|
||||
case r := <-gotReq:
|
||||
got := r.Method + " " + r.RequestURI
|
||||
if got != tt.want {
|
||||
t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("%d. timeout", i)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test an error, too:
|
||||
err = obj.Generation(1234).NewWriter(ctx).Close()
|
||||
if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") {
|
||||
t.Errorf("want error about unsupported generation; got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConditionErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, conds := range []Conditions{
|
||||
{GenerationMatch: 0},
|
||||
{DoesNotExist: false}, // same as above, actually
|
||||
{GenerationMatch: 1, GenerationNotMatch: 2},
|
||||
{GenerationNotMatch: 2, DoesNotExist: true},
|
||||
{MetagenerationMatch: 1, MetagenerationNotMatch: 2},
|
||||
} {
|
||||
if err := conds.validate(""); err == nil {
|
||||
t.Errorf("%+v: got nil, want error", conds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test object compose.
|
||||
func TestObjectCompose(t *testing.T) {
|
||||
t.Parallel()
|
||||
gotURL := make(chan string, 1)
|
||||
gotBody := make(chan []byte, 1)
|
||||
hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
gotURL <- r.URL.String()
|
||||
gotBody <- body
|
||||
w.Write([]byte("{}"))
|
||||
})
|
||||
defer close()
|
||||
ctx := context.Background()
|
||||
c, err := NewClient(ctx, option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
dst *ObjectHandle
|
||||
srcs []*ObjectHandle
|
||||
attrs *ObjectAttrs
|
||||
wantReq raw.ComposeRequest
|
||||
wantURL string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
desc: "basic case",
|
||||
dst: c.Bucket("foo").Object("bar"),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz"),
|
||||
c.Bucket("foo").Object("quux"),
|
||||
},
|
||||
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json",
|
||||
wantReq: raw.ComposeRequest{
|
||||
Destination: &raw.Object{Bucket: "foo"},
|
||||
SourceObjects: []*raw.ComposeRequestSourceObjects{
|
||||
{Name: "baz"},
|
||||
{Name: "quux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with object attrs",
|
||||
dst: c.Bucket("foo").Object("bar"),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz"),
|
||||
c.Bucket("foo").Object("quux"),
|
||||
},
|
||||
attrs: &ObjectAttrs{
|
||||
Name: "not-bar",
|
||||
ContentType: "application/json",
|
||||
},
|
||||
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json",
|
||||
wantReq: raw.ComposeRequest{
|
||||
Destination: &raw.Object{
|
||||
Bucket: "foo",
|
||||
Name: "not-bar",
|
||||
ContentType: "application/json",
|
||||
},
|
||||
SourceObjects: []*raw.ComposeRequestSourceObjects{
|
||||
{Name: "baz"},
|
||||
{Name: "quux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "with conditions",
|
||||
dst: c.Bucket("foo").Object("bar").If(Conditions{
|
||||
GenerationMatch: 12,
|
||||
MetagenerationMatch: 34,
|
||||
}),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz").Generation(56),
|
||||
c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}),
|
||||
},
|
||||
wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34",
|
||||
wantReq: raw.ComposeRequest{
|
||||
Destination: &raw.Object{Bucket: "foo"},
|
||||
SourceObjects: []*raw.ComposeRequestSourceObjects{
|
||||
{
|
||||
Name: "baz",
|
||||
Generation: 56,
|
||||
},
|
||||
{
|
||||
Name: "quux",
|
||||
ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{
|
||||
IfGenerationMatch: 78,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no sources",
|
||||
dst: c.Bucket("foo").Object("bar"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
desc: "destination, no bucket",
|
||||
dst: c.Bucket("").Object("bar"),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
desc: "destination, no object",
|
||||
dst: c.Bucket("foo").Object(""),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
desc: "source, different bucket",
|
||||
dst: c.Bucket("foo").Object("bar"),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("otherbucket").Object("baz"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
desc: "source, no object",
|
||||
dst: c.Bucket("foo").Object("bar"),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object(""),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
desc: "destination, bad condition",
|
||||
dst: c.Bucket("foo").Object("bar").Generation(12),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz"),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
desc: "source, bad condition",
|
||||
dst: c.Bucket("foo").Object("bar"),
|
||||
srcs: []*ObjectHandle{
|
||||
c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}),
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
composer := tt.dst.ComposerFrom(tt.srcs...)
|
||||
if tt.attrs != nil {
|
||||
composer.ObjectAttrs = *tt.attrs
|
||||
}
|
||||
_, err := composer.Run(ctx)
|
||||
if gotErr := err != nil; gotErr != tt.wantErr {
|
||||
t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr)
|
||||
continue
|
||||
}
|
||||
if tt.wantErr {
|
||||
continue
|
||||
}
|
||||
url, body := <-gotURL, <-gotBody
|
||||
if url != tt.wantURL {
|
||||
t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL)
|
||||
}
|
||||
var req raw.ComposeRequest
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body)
|
||||
}
|
||||
if !testutil.Equal(req, tt.wantReq) {
|
||||
// Print to JSON.
|
||||
wantReq, _ := json.Marshal(tt.wantReq)
|
||||
t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that ObjectIterator's Next and NextPage methods correctly terminate
|
||||
// if there is nothing to iterate over.
|
||||
func TestEmptyObjectIterator(t *testing.T) {
|
||||
t.Parallel()
|
||||
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
io.Copy(ioutil.Discard, r.Body)
|
||||
fmt.Fprintf(w, "{}")
|
||||
})
|
||||
defer close()
|
||||
ctx := context.Background()
|
||||
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
it := client.Bucket("b").Objects(ctx, nil)
|
||||
_, err = it.Next()
|
||||
if err != iterator.Done {
|
||||
t.Errorf("got %v, want Done", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that BucketIterator's Next method correctly terminates if there is
|
||||
// nothing to iterate over.
|
||||
func TestEmptyBucketIterator(t *testing.T) {
|
||||
t.Parallel()
|
||||
hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
io.Copy(ioutil.Discard, r.Body)
|
||||
fmt.Fprintf(w, "{}")
|
||||
})
|
||||
defer close()
|
||||
ctx := context.Background()
|
||||
client, err := NewClient(ctx, option.WithHTTPClient(hClient))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
it := client.Buckets(ctx, "project")
|
||||
_, err = it.Next()
|
||||
if err != iterator.Done {
|
||||
t.Errorf("got %v, want Done", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCodecUint32(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, u := range []uint32{0, 1, 256, 0xFFFFFFFF} {
|
||||
s := encodeUint32(u)
|
||||
d, err := decodeUint32(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if d != u {
|
||||
t.Errorf("got %d, want input %d", d, u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(handler))
|
||||
tlsConf := &tls.Config{InsecureSkipVerify: true}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: tlsConf,
|
||||
DialTLS: func(netw, addr string) (net.Conn, error) {
|
||||
return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf)
|
||||
},
|
||||
}
|
||||
return &http.Client{Transport: tr}, func() {
|
||||
tr.CloseIdleConnections()
|
||||
ts.Close()
|
||||
}
|
||||
}
|
39
vendor/cloud.google.com/go/storage/testdata/dummy_pem
generated
vendored
Normal file
39
vendor/cloud.google.com/go/storage/testdata/dummy_pem
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
Bag Attributes
|
||||
friendlyName: privatekey
|
||||
localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32
|
||||
Key Attributes: <No Attributes>
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx
|
||||
64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY
|
||||
kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB
|
||||
AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs
|
||||
KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ
|
||||
7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i
|
||||
Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+
|
||||
5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G
|
||||
ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ
|
||||
EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS
|
||||
NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI
|
||||
/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825
|
||||
TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C
|
||||
-----END RSA PRIVATE KEY-----
|
||||
Bag Attributes
|
||||
friendlyName: privatekey
|
||||
localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32
|
||||
subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com
|
||||
issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE
|
||||
AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex
|
||||
MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y
|
||||
NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45
|
||||
czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j
|
||||
b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU
|
||||
ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp
|
||||
gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx
|
||||
6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/
|
||||
BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB
|
||||
ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T
|
||||
lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv
|
||||
qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2
|
||||
-----END CERTIFICATE-----
|
27
vendor/cloud.google.com/go/storage/testdata/dummy_rsa
generated
vendored
Normal file
27
vendor/cloud.google.com/go/storage/testdata/dummy_rsa
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
|
||||
DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
|
||||
fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
|
||||
1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
|
||||
k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
|
||||
/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
|
||||
3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
|
||||
2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
|
||||
nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
|
||||
6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
|
||||
5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
|
||||
DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
|
||||
M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
|
||||
z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
|
||||
1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
|
||||
J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
|
||||
f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
|
||||
QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
|
||||
cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
|
||||
Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
|
||||
5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
|
||||
KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
|
||||
OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
|
||||
mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
|
||||
5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
|
||||
-----END RSA PRIVATE KEY-----
|
192
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
Normal file
192
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
type Writer struct {
|
||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||
// must be initialized before the first Write call. Nil or zero-valued
|
||||
// attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
SendCRC32C bool
|
||||
|
||||
// ChunkSize controls the maximum number of bytes of the object that the
|
||||
// Writer will attempt to send to the server in a single request. Objects
|
||||
// smaller than the size will be sent in a single request, while larger
|
||||
// objects will be split over multiple requests. The size will be rounded up
|
||||
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
||||
// the object will be uploaded in a single request.
|
||||
//
|
||||
// ChunkSize will default to a reasonable value. Any custom configuration
|
||||
// must be done before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far.
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(int64)
|
||||
|
||||
ctx context.Context
|
||||
o *ObjectHandle
|
||||
|
||||
opened bool
|
||||
pw *io.PipeWriter
|
||||
|
||||
donec chan struct{} // closed after err and obj are set.
|
||||
err error
|
||||
obj *ObjectAttrs
|
||||
}
|
||||
|
||||
func (w *Writer) open() error {
|
||||
attrs := w.ObjectAttrs
|
||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
||||
// we don't want to store an object under the wrong name).
|
||||
if attrs.Name != w.o.object {
|
||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
||||
}
|
||||
if !utf8.ValidString(attrs.Name) {
|
||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
if w.ChunkSize < 0 {
|
||||
return errors.New("storage: Writer.ChunkSize must non-negative")
|
||||
}
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(w.ChunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx)
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.err = err
|
||||
pr.CloseWithError(w.err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
if w.o.userProject != "" {
|
||||
call.UserProject(w.o.userProject)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
// We will only retry here if the initial POST, which obtains a URI for
|
||||
// the resumable upload, fails with a retryable error. The upload itself
|
||||
// has its own retry logic.
|
||||
err = runWithRetry(w.ctx, func() error {
|
||||
var err2 error
|
||||
resp, err2 = call.Do()
|
||||
return err2
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
w.err = err
|
||||
pr.CloseWithError(w.err)
|
||||
return
|
||||
}
|
||||
w.obj = newObject(resp)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
// error even though the write failed (or will fail). Always
|
||||
// use the error returned from Writer.Close to determine if
|
||||
// the upload was successful.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return w.pw.Write(p)
|
||||
}
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := w.pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
<-w.donec
|
||||
return w.err
|
||||
}
|
||||
|
||||
// CloseWithError aborts the write operation with the provided error.
|
||||
// CloseWithError always returns nil.
|
||||
func (w *Writer) CloseWithError(err error) error {
|
||||
if !w.opened {
|
||||
return nil
|
||||
}
|
||||
return w.pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Attrs returns metadata about a successfully-written object.
|
||||
// It's only valid to call it after Close returns nil.
|
||||
func (w *Writer) Attrs() *ObjectAttrs {
|
||||
return w.obj
|
||||
}
|
132
vendor/cloud.google.com/go/storage/writer_test.go
generated
vendored
Normal file
132
vendor/cloud.google.com/go/storage/writer_test.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
type fakeTransport struct {
|
||||
gotReq *http.Request
|
||||
results []transportResult
|
||||
}
|
||||
|
||||
type transportResult struct {
|
||||
res *http.Response
|
||||
err error
|
||||
}
|
||||
|
||||
func (t *fakeTransport) addResult(res *http.Response, err error) {
|
||||
t.results = append(t.results, transportResult{res, err})
|
||||
}
|
||||
|
||||
func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
t.gotReq = req
|
||||
if len(t.results) == 0 {
|
||||
return nil, fmt.Errorf("error handling request")
|
||||
}
|
||||
result := t.results[0]
|
||||
t.results = t.results[1:]
|
||||
return result.res, result.err
|
||||
}
|
||||
|
||||
func TestErrorOnObjectsInsertCall(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
|
||||
doWrite := func(hc *http.Client) *Writer {
|
||||
client, err := NewClient(ctx, option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
t.Fatalf("error when creating client: %v", err)
|
||||
}
|
||||
wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx)
|
||||
wc.ContentType = "text/plain"
|
||||
|
||||
// We can't check that the Write fails, since it depends on the write to the
|
||||
// underling fakeTransport failing which is racy.
|
||||
wc.Write([]byte("hello world"))
|
||||
return wc
|
||||
}
|
||||
|
||||
wc := doWrite(&http.Client{Transport: &fakeTransport{}})
|
||||
// Close must always return an error though since it waits for the transport to
|
||||
// have closed.
|
||||
if err := wc.Close(); err == nil {
|
||||
t.Errorf("expected error on close, got nil")
|
||||
}
|
||||
|
||||
// Retry on 5xx
|
||||
ft := &fakeTransport{}
|
||||
ft.addResult(&http.Response{
|
||||
StatusCode: 503,
|
||||
Body: ioutil.NopCloser(&bytes.Buffer{}),
|
||||
}, nil)
|
||||
ft.addResult(&http.Response{
|
||||
StatusCode: 200,
|
||||
Body: ioutil.NopCloser(strings.NewReader("{}")),
|
||||
}, nil)
|
||||
wc = doWrite(&http.Client{Transport: ft})
|
||||
if err := wc.Close(); err != nil {
|
||||
t.Errorf("got %v, want nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryption(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
ft := &fakeTransport{}
|
||||
hc := &http.Client{Transport: ft}
|
||||
client, err := NewClient(ctx, option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
t.Fatalf("error when creating client: %v", err)
|
||||
}
|
||||
obj := client.Bucket("bucketname").Object("filename1")
|
||||
key := []byte("secret-key-that-is-32-bytes-long")
|
||||
wc := obj.Key(key).NewWriter(ctx)
|
||||
// TODO(jba): use something other than fakeTransport, which always returns error.
|
||||
wc.Write([]byte("hello world"))
|
||||
wc.Close()
|
||||
if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want {
|
||||
t.Errorf("algorithm: got %q, want %q", got, want)
|
||||
}
|
||||
gotKey, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key"))
|
||||
if err != nil {
|
||||
t.Fatalf("decoding key: %v", err)
|
||||
}
|
||||
if !testutil.Equal(gotKey, key) {
|
||||
t.Errorf("key: got %v, want %v", gotKey, key)
|
||||
}
|
||||
wantHash := sha256.Sum256(key)
|
||||
gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256"))
|
||||
if err != nil {
|
||||
t.Fatalf("decoding hash: %v", err)
|
||||
}
|
||||
if !testutil.Equal(gotHash, wantHash[:]) { // wantHash is an array
|
||||
t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user