mirror of
https://github.com/tailscale/tailscale.git
synced 2025-08-14 06:57:31 +00:00
taildrop: merge taildrop and feature/taildrop packages together
Fixes #15812 Change-Id: I3bf0666bf9e7a9caea5f0f99fdb0eb2812157608 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:

committed by
Brad Fitzpatrick

parent
068d5ab655
commit
5b597489bc
@@ -908,7 +908,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
|
||||
tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+
|
||||
tailscale.com/syncs from tailscale.com/control/controlknobs+
|
||||
tailscale.com/tailcfg from tailscale.com/client/local+
|
||||
tailscale.com/taildrop from tailscale.com/feature/taildrop
|
||||
tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal
|
||||
tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock
|
||||
tailscale.com/tempfork/httprec from tailscale.com/control/controlclient
|
||||
|
@@ -359,7 +359,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||
LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled
|
||||
tailscale.com/syncs from tailscale.com/cmd/tailscaled+
|
||||
tailscale.com/tailcfg from tailscale.com/client/local+
|
||||
tailscale.com/taildrop from tailscale.com/feature/taildrop
|
||||
tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal
|
||||
LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh
|
||||
tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock
|
||||
|
@@ -47,7 +47,7 @@ type deleteFile struct {
|
||||
inserted time.Time
|
||||
}
|
||||
|
||||
func (d *fileDeleter) Init(m *Manager, eventHook func(string)) {
|
||||
func (d *fileDeleter) Init(m *manager, eventHook func(string)) {
|
||||
d.logf = m.opts.Logf
|
||||
d.clock = m.opts.Clock
|
||||
d.dir = m.opts.Dir
|
||||
@@ -81,7 +81,7 @@ func (d *fileDeleter) Init(m *Manager, eventHook func(string)) {
|
||||
// Only enqueue the file for deletion if there is no active put.
|
||||
nameID := strings.TrimSuffix(de.Name(), partialSuffix)
|
||||
if i := strings.LastIndexByte(nameID, '.'); i > 0 {
|
||||
key := incomingFileKey{ClientID(nameID[i+len("."):]), nameID[:i]}
|
||||
key := incomingFileKey{clientID(nameID[i+len("."):]), nameID[:i]}
|
||||
m.incomingFiles.LoadFunc(key, func(_ *incomingFile, loaded bool) {
|
||||
if !loaded {
|
||||
d.Insert(de.Name())
|
@@ -69,7 +69,7 @@ func TestDeleter(t *testing.T) {
|
||||
}
|
||||
eventHook := func(event string) { eventsChan <- event }
|
||||
|
||||
var m Manager
|
||||
var m manager
|
||||
var fd fileDeleter
|
||||
m.opts.Logf = t.Logf
|
||||
m.opts.Clock = tstime.DefaultClock{Clock: clock}
|
||||
@@ -142,7 +142,7 @@ func TestDeleter(t *testing.T) {
|
||||
// Test that the asynchronous full scan of the taildrop directory does not occur
|
||||
// on a cold start if taildrop has never received any files.
|
||||
func TestDeleterInitWithoutTaildrop(t *testing.T) {
|
||||
var m Manager
|
||||
var m manager
|
||||
var fd fileDeleter
|
||||
m.opts.Logf = t.Logf
|
||||
m.opts.Dir = t.TempDir()
|
@@ -22,7 +22,6 @@ import (
|
||||
"tailscale.com/ipn/ipnext"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/taildrop"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/empty"
|
||||
"tailscale.com/types/logger"
|
||||
@@ -72,7 +71,7 @@ type Extension struct {
|
||||
selfUID tailcfg.UserID
|
||||
capFileSharing bool
|
||||
fileWaiters set.HandleSet[context.CancelFunc] // of wake-up funcs
|
||||
mgr atomic.Pointer[taildrop.Manager] // mutex held to write; safe to read without lock;
|
||||
mgr atomic.Pointer[manager] // mutex held to write; safe to read without lock;
|
||||
// outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID
|
||||
outgoingFiles map[string]*ipn.OutgoingFile
|
||||
}
|
||||
@@ -113,7 +112,7 @@ func (e *Extension) onSelfChange(self tailcfg.NodeView) {
|
||||
osshare.SetFileSharingEnabled(e.capFileSharing, e.logf)
|
||||
}
|
||||
|
||||
func (e *Extension) setMgrLocked(mgr *taildrop.Manager) {
|
||||
func (e *Extension) setMgrLocked(mgr *manager) {
|
||||
if old := e.mgr.Swap(mgr); old != nil {
|
||||
old.Shutdown()
|
||||
}
|
||||
@@ -141,7 +140,7 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie
|
||||
if fileRoot == "" {
|
||||
e.logf("no Taildrop directory configured")
|
||||
}
|
||||
e.setMgrLocked(taildrop.ManagerOptions{
|
||||
e.setMgrLocked(managerOptions{
|
||||
Logf: e.logf,
|
||||
Clock: tstime.DefaultClock{Clock: e.sb.Clock()},
|
||||
State: e.stateStore,
|
||||
@@ -191,10 +190,10 @@ func (e *Extension) hasCapFileSharing() bool {
|
||||
return e.capFileSharing
|
||||
}
|
||||
|
||||
// manager returns the active taildrop.Manager, or nil.
|
||||
// manager returns the active Manager, or nil.
|
||||
//
|
||||
// Methods on a nil Manager are safe to call.
|
||||
func (e *Extension) manager() *taildrop.Manager {
|
||||
func (e *Extension) manager() *manager {
|
||||
return e.mgr.Load()
|
||||
}
|
||||
|
||||
|
@@ -24,7 +24,6 @@ import (
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/ipn/localapi"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/taildrop"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/httphdr"
|
||||
"tailscale.com/util/mak"
|
||||
@@ -320,7 +319,7 @@ func singleFilePut(
|
||||
default:
|
||||
resumeStart := time.Now()
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
offset, remainingBody, err = taildrop.ResumeReader(body, func() (out taildrop.BlockChecksum, err error) {
|
||||
offset, remainingBody, err = resumeReader(body, func() (out blockChecksum, err error) {
|
||||
err = dec.Decode(&out)
|
||||
return out, err
|
||||
})
|
||||
|
@@ -14,7 +14,6 @@ import (
|
||||
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/taildrop"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/httphdr"
|
||||
@@ -49,7 +48,7 @@ func handlePeerPut(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Req
|
||||
// extensionForPut is the subset of taildrop extension that taildrop
|
||||
// file put needs. This is pulled out for testability.
|
||||
type extensionForPut interface {
|
||||
manager() *taildrop.Manager
|
||||
manager() *manager
|
||||
hasCapFileSharing() bool
|
||||
Clock() tstime.Clock
|
||||
}
|
||||
@@ -67,11 +66,11 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w
|
||||
}
|
||||
|
||||
if !canPutFile(h) {
|
||||
http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden)
|
||||
http.Error(w, ErrNoTaildrop.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if !ext.hasCapFileSharing() {
|
||||
http.Error(w, taildrop.ErrNoTaildrop.Error(), http.StatusForbidden)
|
||||
http.Error(w, ErrNoTaildrop.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
rawPath := r.URL.EscapedPath()
|
||||
@@ -82,13 +81,13 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w
|
||||
}
|
||||
baseName, err := url.PathUnescape(prefix)
|
||||
if err != nil {
|
||||
http.Error(w, taildrop.ErrInvalidFileName.Error(), http.StatusBadRequest)
|
||||
http.Error(w, ErrInvalidFileName.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
id := taildrop.ClientID(h.Peer().StableID())
|
||||
id := clientID(h.Peer().StableID())
|
||||
if prefix == "" {
|
||||
// List all the partial files.
|
||||
files, err := taildropMgr.PartialFiles(id)
|
||||
@@ -128,7 +127,7 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w
|
||||
}
|
||||
case "PUT":
|
||||
t0 := ext.Clock().Now()
|
||||
id := taildrop.ClientID(h.Peer().StableID())
|
||||
id := clientID(h.Peer().StableID())
|
||||
|
||||
var offset int64
|
||||
if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
|
||||
@@ -139,17 +138,17 @@ func handlePeerPutWithBackend(h ipnlocal.PeerAPIHandler, ext extensionForPut, w
|
||||
}
|
||||
offset = ranges[0].Start
|
||||
}
|
||||
n, err := taildropMgr.PutFile(taildrop.ClientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength)
|
||||
n, err := taildropMgr.PutFile(clientID(fmt.Sprint(id)), baseName, r.Body, offset, r.ContentLength)
|
||||
switch err {
|
||||
case nil:
|
||||
d := ext.Clock().Since(t0).Round(time.Second / 10)
|
||||
h.Logf("got put of %s in %v from %v/%v", approxSize(n), d, h.RemoteAddr().Addr(), h.Peer().ComputedName)
|
||||
io.WriteString(w, "{}\n")
|
||||
case taildrop.ErrNoTaildrop:
|
||||
case ErrNoTaildrop:
|
||||
http.Error(w, err.Error(), http.StatusForbidden)
|
||||
case taildrop.ErrInvalidFileName:
|
||||
case ErrInvalidFileName:
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
case taildrop.ErrFileExists:
|
||||
case ErrFileExists:
|
||||
http.Error(w, err.Error(), http.StatusConflict)
|
||||
default:
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/taildrop"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/logger"
|
||||
@@ -54,10 +53,10 @@ type fakeExtension struct {
|
||||
logf logger.Logf
|
||||
capFileSharing bool
|
||||
clock tstime.Clock
|
||||
taildrop *taildrop.Manager
|
||||
taildrop *manager
|
||||
}
|
||||
|
||||
func (lb *fakeExtension) manager() *taildrop.Manager {
|
||||
func (lb *fakeExtension) manager() *manager {
|
||||
return lb.taildrop
|
||||
}
|
||||
func (lb *fakeExtension) Clock() tstime.Clock { return lb.clock }
|
||||
@@ -66,7 +65,7 @@ func (lb *fakeExtension) hasCapFileSharing() bool {
|
||||
}
|
||||
|
||||
type peerAPITestEnv struct {
|
||||
taildrop *taildrop.Manager
|
||||
taildrop *manager
|
||||
ph *peerAPIHandler
|
||||
rr *httptest.ResponseRecorder
|
||||
logBuf tstest.MemLogger
|
||||
@@ -477,7 +476,7 @@ func TestHandlePeerAPI(t *testing.T) {
|
||||
}
|
||||
|
||||
var e peerAPITestEnv
|
||||
e.taildrop = taildrop.ManagerOptions{
|
||||
e.taildrop = managerOptions{
|
||||
Logf: e.logBuf.Logf,
|
||||
Dir: rootDir,
|
||||
}.New()
|
||||
@@ -526,7 +525,7 @@ func TestHandlePeerAPI(t *testing.T) {
|
||||
// a bit. So test that we work around that sufficiently.
|
||||
func TestFileDeleteRace(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
taildropMgr := taildrop.ManagerOptions{
|
||||
taildropMgr := managerOptions{
|
||||
Logf: t.Logf,
|
||||
Dir: dir,
|
||||
}.New()
|
||||
|
@@ -19,29 +19,29 @@ var (
|
||||
hashAlgorithm = "sha256"
|
||||
)
|
||||
|
||||
// BlockChecksum represents the checksum for a single block.
|
||||
type BlockChecksum struct {
|
||||
Checksum Checksum `json:"checksum"`
|
||||
// blockChecksum represents the checksum for a single block.
|
||||
type blockChecksum struct {
|
||||
Checksum checksum `json:"checksum"`
|
||||
Algorithm string `json:"algo"` // always "sha256" for now
|
||||
Size int64 `json:"size"` // always (64<<10) for now
|
||||
}
|
||||
|
||||
// Checksum is an opaque checksum that is comparable.
|
||||
type Checksum struct{ cs [sha256.Size]byte }
|
||||
// checksum is an opaque checksum that is comparable.
|
||||
type checksum struct{ cs [sha256.Size]byte }
|
||||
|
||||
func hash(b []byte) Checksum {
|
||||
return Checksum{sha256.Sum256(b)}
|
||||
func hash(b []byte) checksum {
|
||||
return checksum{sha256.Sum256(b)}
|
||||
}
|
||||
func (cs Checksum) String() string {
|
||||
func (cs checksum) String() string {
|
||||
return hex.EncodeToString(cs.cs[:])
|
||||
}
|
||||
func (cs Checksum) AppendText(b []byte) ([]byte, error) {
|
||||
func (cs checksum) AppendText(b []byte) ([]byte, error) {
|
||||
return hex.AppendEncode(b, cs.cs[:]), nil
|
||||
}
|
||||
func (cs Checksum) MarshalText() ([]byte, error) {
|
||||
func (cs checksum) MarshalText() ([]byte, error) {
|
||||
return hex.AppendEncode(nil, cs.cs[:]), nil
|
||||
}
|
||||
func (cs *Checksum) UnmarshalText(b []byte) error {
|
||||
func (cs *checksum) UnmarshalText(b []byte) error {
|
||||
if len(b) != 2*len(cs.cs) {
|
||||
return fmt.Errorf("invalid hex length: %d", len(b))
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func (cs *Checksum) UnmarshalText(b []byte) error {
|
||||
|
||||
// PartialFiles returns a list of partial files in [Handler.Dir]
|
||||
// that were sent (or is actively being sent) by the provided id.
|
||||
func (m *Manager) PartialFiles(id ClientID) (ret []string, err error) {
|
||||
func (m *manager) PartialFiles(id clientID) (ret []string, err error) {
|
||||
if m == nil || m.opts.Dir == "" {
|
||||
return nil, ErrNoTaildrop
|
||||
}
|
||||
@@ -72,11 +72,11 @@ func (m *Manager) PartialFiles(id ClientID) (ret []string, err error) {
|
||||
// starting from the beginning of the file.
|
||||
// It returns (BlockChecksum{}, io.EOF) when the stream is complete.
|
||||
// It is the caller's responsibility to call close.
|
||||
func (m *Manager) HashPartialFile(id ClientID, baseName string) (next func() (BlockChecksum, error), close func() error, err error) {
|
||||
func (m *manager) HashPartialFile(id clientID, baseName string) (next func() (blockChecksum, error), close func() error, err error) {
|
||||
if m == nil || m.opts.Dir == "" {
|
||||
return nil, nil, ErrNoTaildrop
|
||||
}
|
||||
noopNext := func() (BlockChecksum, error) { return BlockChecksum{}, io.EOF }
|
||||
noopNext := func() (blockChecksum, error) { return blockChecksum{}, io.EOF }
|
||||
noopClose := func() error { return nil }
|
||||
|
||||
dstFile, err := joinDir(m.opts.Dir, baseName)
|
||||
@@ -92,25 +92,25 @@ func (m *Manager) HashPartialFile(id ClientID, baseName string) (next func() (Bl
|
||||
}
|
||||
|
||||
b := make([]byte, blockSize) // TODO: Pool this?
|
||||
next = func() (BlockChecksum, error) {
|
||||
next = func() (blockChecksum, error) {
|
||||
switch n, err := io.ReadFull(f, b); {
|
||||
case err != nil && err != io.EOF && err != io.ErrUnexpectedEOF:
|
||||
return BlockChecksum{}, redactError(err)
|
||||
return blockChecksum{}, redactError(err)
|
||||
case n == 0:
|
||||
return BlockChecksum{}, io.EOF
|
||||
return blockChecksum{}, io.EOF
|
||||
default:
|
||||
return BlockChecksum{hash(b[:n]), hashAlgorithm, int64(n)}, nil
|
||||
return blockChecksum{hash(b[:n]), hashAlgorithm, int64(n)}, nil
|
||||
}
|
||||
}
|
||||
close = f.Close
|
||||
return next, close, nil
|
||||
}
|
||||
|
||||
// ResumeReader reads and discards the leading content of r
|
||||
// resumeReader reads and discards the leading content of r
|
||||
// that matches the content based on the checksums that exist.
|
||||
// It returns the number of bytes consumed,
|
||||
// and returns an [io.Reader] representing the remaining content.
|
||||
func ResumeReader(r io.Reader, hashNext func() (BlockChecksum, error)) (int64, io.Reader, error) {
|
||||
func resumeReader(r io.Reader, hashNext func() (blockChecksum, error)) (int64, io.Reader, error) {
|
||||
if hashNext == nil {
|
||||
return 0, r, nil
|
||||
}
|
@@ -19,7 +19,7 @@ func TestResume(t *testing.T) {
|
||||
defer func() { blockSize = oldBlockSize }()
|
||||
blockSize = 256
|
||||
|
||||
m := ManagerOptions{Logf: t.Logf, Dir: t.TempDir()}.New()
|
||||
m := managerOptions{Logf: t.Logf, Dir: t.TempDir()}.New()
|
||||
defer m.Shutdown()
|
||||
|
||||
rn := rand.New(rand.NewSource(0))
|
||||
@@ -32,7 +32,7 @@ func TestResume(t *testing.T) {
|
||||
next, close, err := m.HashPartialFile("", "foo")
|
||||
must.Do(err)
|
||||
defer close()
|
||||
offset, r, err := ResumeReader(r, next)
|
||||
offset, r, err := resumeReader(r, next)
|
||||
must.Do(err)
|
||||
must.Do(close()) // Windows wants the file handle to be closed to rename it.
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestResume(t *testing.T) {
|
||||
next, close, err := m.HashPartialFile("", "bar")
|
||||
must.Do(err)
|
||||
defer close()
|
||||
offset, r, err := ResumeReader(r, next)
|
||||
offset, r, err := resumeReader(r, next)
|
||||
must.Do(err)
|
||||
must.Do(close()) // Windows wants the file handle to be closed to rename it.
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
// HasFilesWaiting reports whether any files are buffered in [Handler.Dir].
|
||||
// This always returns false when [Handler.DirectFileMode] is false.
|
||||
func (m *Manager) HasFilesWaiting() (has bool) {
|
||||
func (m *manager) HasFilesWaiting() (has bool) {
|
||||
if m == nil || m.opts.Dir == "" || m.opts.DirectFileMode {
|
||||
return false
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func (m *Manager) HasFilesWaiting() (has bool) {
|
||||
// WaitingFiles returns the list of files that have been sent by a
|
||||
// peer that are waiting in [Handler.Dir].
|
||||
// This always returns nil when [Handler.DirectFileMode] is false.
|
||||
func (m *Manager) WaitingFiles() (ret []apitype.WaitingFile, err error) {
|
||||
func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) {
|
||||
if m == nil || m.opts.Dir == "" {
|
||||
return nil, ErrNoTaildrop
|
||||
}
|
||||
@@ -94,7 +94,7 @@ func (m *Manager) WaitingFiles() (ret []apitype.WaitingFile, err error) {
|
||||
|
||||
// DeleteFile deletes a file of the given baseName from [Handler.Dir].
|
||||
// This method is only allowed when [Handler.DirectFileMode] is false.
|
||||
func (m *Manager) DeleteFile(baseName string) error {
|
||||
func (m *manager) DeleteFile(baseName string) error {
|
||||
if m == nil || m.opts.Dir == "" {
|
||||
return ErrNoTaildrop
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func touchFile(path string) error {
|
||||
|
||||
// OpenFile opens a file of the given baseName from [Handler.Dir].
|
||||
// This method is only allowed when [Handler.DirectFileMode] is false.
|
||||
func (m *Manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) {
|
||||
func (m *manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) {
|
||||
if m == nil || m.opts.Dir == "" {
|
||||
return nil, 0, ErrNoTaildrop
|
||||
}
|
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
type incomingFileKey struct {
|
||||
id ClientID
|
||||
id clientID
|
||||
name string // e.g., "foo.jpeg"
|
||||
}
|
||||
|
||||
@@ -61,19 +61,19 @@ func (f *incomingFile) Write(p []byte) (n int, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// PutFile stores a file into [Manager.Dir] from a given client id.
|
||||
// PutFile stores a file into [manager.Dir] from a given client id.
|
||||
// The baseName must be a base filename without any slashes.
|
||||
// The length is the expected length of content to read from r,
|
||||
// it may be negative to indicate that it is unknown.
|
||||
// It returns the length of the entire file.
|
||||
//
|
||||
// If there is a failure reading from r, then the partial file is not deleted
|
||||
// for some period of time. The [Manager.PartialFiles] and [Manager.HashPartialFile]
|
||||
// for some period of time. The [manager.PartialFiles] and [manager.HashPartialFile]
|
||||
// methods may be used to list all partial files and to compute the hash for a
|
||||
// specific partial file. This allows the client to determine whether to resume
|
||||
// a partial file. While resuming, PutFile may be called again with a non-zero
|
||||
// offset to specify where to resume receiving data at.
|
||||
func (m *Manager) PutFile(id ClientID, baseName string, r io.Reader, offset, length int64) (int64, error) {
|
||||
func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (int64, error) {
|
||||
switch {
|
||||
case m == nil || m.opts.Dir == "":
|
||||
return 0, ErrNoTaildrop
|
||||
@@ -227,7 +227,7 @@ func (m *Manager) PutFile(id ClientID, baseName string, r io.Reader, offset, len
|
||||
}
|
||||
|
||||
// Choose a new destination filename and try again.
|
||||
dstPath = NextFilename(dstPath)
|
||||
dstPath = nextFilename(dstPath)
|
||||
inFile.finalPath = dstPath
|
||||
}
|
||||
if maxRetries <= 0 {
|
@@ -54,20 +54,20 @@ const (
|
||||
deletedSuffix = ".deleted"
|
||||
)
|
||||
|
||||
// ClientID is an opaque identifier for file resumption.
|
||||
// clientID is an opaque identifier for file resumption.
|
||||
// A client can only list and resume partial files for its own ID.
|
||||
// It must contain any filesystem specific characters (e.g., slashes).
|
||||
type ClientID string // e.g., "n12345CNTRL"
|
||||
type clientID string // e.g., "n12345CNTRL"
|
||||
|
||||
func (id ClientID) partialSuffix() string {
|
||||
func (id clientID) partialSuffix() string {
|
||||
if id == "" {
|
||||
return partialSuffix
|
||||
}
|
||||
return "." + string(id) + partialSuffix // e.g., ".n12345CNTRL.partial"
|
||||
}
|
||||
|
||||
// ManagerOptions are options to configure the [Manager].
|
||||
type ManagerOptions struct {
|
||||
// managerOptions are options to configure the [manager].
|
||||
type managerOptions struct {
|
||||
Logf logger.Logf // may be nil
|
||||
Clock tstime.DefaultClock // may be nil
|
||||
State ipn.StateStore // may be nil
|
||||
@@ -98,9 +98,9 @@ type ManagerOptions struct {
|
||||
SendFileNotify func()
|
||||
}
|
||||
|
||||
// Manager manages the state for receiving and managing taildropped files.
|
||||
type Manager struct {
|
||||
opts ManagerOptions
|
||||
// manager manages the state for receiving and managing taildropped files.
|
||||
type manager struct {
|
||||
opts managerOptions
|
||||
|
||||
// incomingFiles is a map of files actively being received.
|
||||
incomingFiles syncs.Map[incomingFileKey, *incomingFile]
|
||||
@@ -120,27 +120,27 @@ type Manager struct {
|
||||
// New initializes a new taildrop manager.
|
||||
// It may spawn asynchronous goroutines to delete files,
|
||||
// so the Shutdown method must be called for resource cleanup.
|
||||
func (opts ManagerOptions) New() *Manager {
|
||||
func (opts managerOptions) New() *manager {
|
||||
if opts.Logf == nil {
|
||||
opts.Logf = logger.Discard
|
||||
}
|
||||
if opts.SendFileNotify == nil {
|
||||
opts.SendFileNotify = func() {}
|
||||
}
|
||||
m := &Manager{opts: opts}
|
||||
m := &manager{opts: opts}
|
||||
m.deleter.Init(m, func(string) {})
|
||||
m.emptySince.Store(-1) // invalidate this cache
|
||||
return m
|
||||
}
|
||||
|
||||
// Dir returns the directory.
|
||||
func (m *Manager) Dir() string {
|
||||
func (m *manager) Dir() string {
|
||||
return m.opts.Dir
|
||||
}
|
||||
|
||||
// Shutdown shuts down the Manager.
|
||||
// It blocks until all spawned goroutines have stopped running.
|
||||
func (m *Manager) Shutdown() {
|
||||
func (m *manager) Shutdown() {
|
||||
if m != nil {
|
||||
m.deleter.shutdown()
|
||||
m.deleter.group.Wait()
|
||||
@@ -222,7 +222,7 @@ func rangeDir(dir string, fn func(fs.DirEntry) bool) error {
|
||||
}
|
||||
|
||||
// IncomingFiles returns a list of active incoming files.
|
||||
func (m *Manager) IncomingFiles() []ipn.PartialFile {
|
||||
func (m *manager) IncomingFiles() []ipn.PartialFile {
|
||||
// Make sure we always set n.IncomingFiles non-nil so it gets encoded
|
||||
// in JSON to clients. They distinguish between empty and non-nil
|
||||
// to know whether a Notify should be able about files.
|
||||
@@ -318,12 +318,12 @@ var (
|
||||
rxNumberSuffix = regexp.MustCompile(` \([0-9]+\)`)
|
||||
)
|
||||
|
||||
// NextFilename returns the next filename in a sequence.
|
||||
// nextFilename returns the next filename in a sequence.
|
||||
// It is used for construction a new filename if there is a conflict.
|
||||
//
|
||||
// For example, "Foo.jpg" becomes "Foo (1).jpg" and
|
||||
// "Foo (1).jpg" becomes "Foo (2).jpg".
|
||||
func NextFilename(name string) string {
|
||||
func nextFilename(name string) string {
|
||||
ext := rxExtensionSuffix.FindString(strings.TrimPrefix(name, "."))
|
||||
name = strings.TrimSuffix(name, ext)
|
||||
var n uint64
|
@@ -59,10 +59,10 @@ func TestNextFilename(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if got := NextFilename(tt.in); got != tt.want {
|
||||
if got := nextFilename(tt.in); got != tt.want {
|
||||
t.Errorf("NextFilename(%q) = %q, want %q", tt.in, got, tt.want)
|
||||
}
|
||||
if got2 := NextFilename(tt.want); got2 != tt.want2 {
|
||||
if got2 := nextFilename(tt.want); got2 != tt.want2 {
|
||||
t.Errorf("NextFilename(%q) = %q, want %q", tt.want, got2, tt.want2)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user