2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
// Package logtail sends logs to log.tailscale.io.
|
|
|
|
package logtail
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
2022-05-18 05:28:57 +00:00
|
|
|
"crypto/rand"
|
|
|
|
"encoding/binary"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2022-10-13 19:51:17 +00:00
|
|
|
"log"
|
2024-06-05 21:37:31 +00:00
|
|
|
mrand "math/rand/v2"
|
2020-02-05 22:16:58 +00:00
|
|
|
"net/http"
|
2023-12-15 23:15:49 +00:00
|
|
|
"net/netip"
|
2020-02-05 22:16:58 +00:00
|
|
|
"os"
|
2023-12-15 23:15:49 +00:00
|
|
|
"regexp"
|
2024-03-29 17:26:31 +00:00
|
|
|
"runtime"
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
"slices"
|
2020-11-24 02:35:49 +00:00
|
|
|
"strconv"
|
2022-05-18 05:28:57 +00:00
|
|
|
"sync"
|
2021-05-25 22:02:52 +00:00
|
|
|
"sync/atomic"
|
2020-02-05 22:16:58 +00:00
|
|
|
"time"
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
"github.com/go-json-experiment/json/jsontext"
|
2022-10-13 19:51:17 +00:00
|
|
|
"tailscale.com/envknob"
|
2023-04-18 21:26:58 +00:00
|
|
|
"tailscale.com/net/netmon"
|
2023-02-03 20:07:58 +00:00
|
|
|
"tailscale.com/net/sockstats"
|
2023-12-15 23:15:49 +00:00
|
|
|
"tailscale.com/net/tsaddr"
|
2023-05-11 19:52:35 +00:00
|
|
|
"tailscale.com/tstime"
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
tslogger "tailscale.com/types/logger"
|
2023-03-01 20:18:23 +00:00
|
|
|
"tailscale.com/types/logid"
|
2022-12-24 04:54:30 +00:00
|
|
|
"tailscale.com/util/set"
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
"tailscale.com/util/truncate"
|
2024-03-21 19:20:38 +00:00
|
|
|
"tailscale.com/util/zstdframe"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// maxSize is the maximum size that a single log entry can be.
|
|
|
|
// It is also the maximum body size that may be uploaded at a time.
|
|
|
|
const maxSize = 256 << 10
|
|
|
|
|
|
|
|
// maxTextSize is the maximum size for a text log message.
|
|
|
|
// Note that JSON log messages can be as large as maxSize.
|
|
|
|
const maxTextSize = 16 << 10
|
|
|
|
|
|
|
|
// lowMemRatio reduces maxSize and maxTextSize by this ratio in lowMem mode.
|
|
|
|
const lowMemRatio = 4
|
|
|
|
|
|
|
|
// bufferSize is the typical buffer size to retain.
|
|
|
|
// It is large enough to handle most log messages,
|
|
|
|
// but not too large to be a notable waste of memory if retained forever.
|
|
|
|
const bufferSize = 4 << 10
|
|
|
|
|
2020-04-26 15:31:07 +00:00
|
|
|
// DefaultHost is the default host name to upload logs to when
|
|
|
|
// Config.BaseURL isn't provided.
|
|
|
|
const DefaultHost = "log.tailscale.io"
|
|
|
|
|
2022-10-15 16:02:28 +00:00
|
|
|
const defaultFlushDelay = 2 * time.Second
|
|
|
|
|
2021-11-16 04:52:43 +00:00
|
|
|
const (
|
|
|
|
// CollectionNode is the name of a logtail Config.Collection
|
|
|
|
// for tailscaled (or equivalent: IPNExtension, Android app).
|
|
|
|
CollectionNode = "tailnode.log.tailscale.io"
|
|
|
|
)
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
type Config struct {
|
2023-07-21 17:10:39 +00:00
|
|
|
Collection string // collection name, a domain name
|
|
|
|
PrivateID logid.PrivateID // private ID for the primary log stream
|
|
|
|
CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream
|
|
|
|
BaseURL string // if empty defaults to "https://log.tailscale.io"
|
|
|
|
HTTPC *http.Client // if empty defaults to http.DefaultClient
|
|
|
|
SkipClientTime bool // if true, client_time is not written to logs
|
|
|
|
LowMemory bool // if true, logtail minimizes memory use
|
|
|
|
Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now
|
|
|
|
Stderr io.Writer // if set, logs are sent here instead of os.Stderr
|
|
|
|
StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only
|
|
|
|
Buffer Buffer // temp storage, if nil a MemoryBuffer
|
2024-03-21 19:20:38 +00:00
|
|
|
CompressLogs bool // whether to compress the log uploads
|
|
|
|
|
2021-11-16 04:52:43 +00:00
|
|
|
// MetricsDelta, if non-nil, is a func that returns an encoding
|
|
|
|
// delta in clientmetrics to upload alongside existing logs.
|
|
|
|
// It can return either an empty string (for nothing) or a string
|
|
|
|
// that's safe to embed in a JSON string literal without further escaping.
|
|
|
|
MetricsDelta func() string
|
|
|
|
|
2022-12-16 23:01:17 +00:00
|
|
|
// FlushDelayFn, if non-nil is a func that returns how long to wait to
|
|
|
|
// accumulate logs before uploading them. 0 or negative means to upload
|
|
|
|
// immediately.
|
2022-10-15 16:02:28 +00:00
|
|
|
//
|
2022-12-16 23:01:17 +00:00
|
|
|
// If nil, a default value is used. (currently 2 seconds)
|
|
|
|
FlushDelayFn func() time.Duration
|
2022-05-18 05:28:57 +00:00
|
|
|
|
|
|
|
// IncludeProcID, if true, results in an ephemeral process identifier being
|
|
|
|
// included in logs. The ID is random and not guaranteed to be globally
|
|
|
|
// unique, but it can be used to distinguish between different instances
|
|
|
|
// running with same PrivateID.
|
|
|
|
IncludeProcID bool
|
|
|
|
|
|
|
|
// IncludeProcSequence, if true, results in an ephemeral sequence number
|
|
|
|
// being included in the logs. The sequence number is incremented for each
|
2022-09-25 18:29:55 +00:00
|
|
|
// log message sent, but is not persisted across process restarts.
|
2022-05-18 05:28:57 +00:00
|
|
|
IncludeProcSequence bool
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-12-21 17:03:39 +00:00
|
|
|
func NewLogger(cfg Config, logf tslogger.Logf) *Logger {
|
2020-02-05 22:16:58 +00:00
|
|
|
if cfg.BaseURL == "" {
|
2020-04-26 15:31:07 +00:00
|
|
|
cfg.BaseURL = "https://" + DefaultHost
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if cfg.HTTPC == nil {
|
|
|
|
cfg.HTTPC = http.DefaultClient
|
|
|
|
}
|
2023-07-21 17:10:39 +00:00
|
|
|
if cfg.Clock == nil {
|
|
|
|
cfg.Clock = tstime.StdClock{}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if cfg.Stderr == nil {
|
|
|
|
cfg.Stderr = os.Stderr
|
|
|
|
}
|
|
|
|
if cfg.Buffer == nil {
|
|
|
|
pendingSize := 256
|
|
|
|
if cfg.LowMemory {
|
|
|
|
pendingSize = 64
|
|
|
|
}
|
|
|
|
cfg.Buffer = NewMemoryBuffer(pendingSize)
|
|
|
|
}
|
2022-05-18 05:28:57 +00:00
|
|
|
var procID uint32
|
|
|
|
if cfg.IncludeProcID {
|
|
|
|
keyBytes := make([]byte, 4)
|
|
|
|
rand.Read(keyBytes)
|
|
|
|
procID = binary.LittleEndian.Uint32(keyBytes)
|
|
|
|
if procID == 0 {
|
|
|
|
// 0 is the empty/off value, assign a different (non-zero) value to
|
|
|
|
// make sure we still include an ID (actual value does not matter).
|
|
|
|
procID = 7
|
|
|
|
}
|
|
|
|
}
|
2022-10-13 19:51:17 +00:00
|
|
|
if s := envknob.String("TS_DEBUG_LOGTAIL_FLUSHDELAY"); s != "" {
|
2022-12-16 23:01:17 +00:00
|
|
|
if delay, err := time.ParseDuration(s); err == nil {
|
|
|
|
cfg.FlushDelayFn = func() time.Duration { return delay }
|
|
|
|
} else {
|
2022-10-13 19:51:17 +00:00
|
|
|
log.Fatalf("invalid TS_DEBUG_LOGTAIL_FLUSHDELAY: %v", err)
|
|
|
|
}
|
2022-12-16 23:01:17 +00:00
|
|
|
} else if cfg.FlushDelayFn == nil && envknob.Bool("IN_TS_TEST") {
|
|
|
|
cfg.FlushDelayFn = func() time.Duration { return 0 }
|
2022-10-13 19:51:17 +00:00
|
|
|
}
|
2022-08-30 13:21:03 +00:00
|
|
|
|
2022-10-05 19:25:10 +00:00
|
|
|
var urlSuffix string
|
|
|
|
if !cfg.CopyPrivateID.IsZero() {
|
|
|
|
urlSuffix = "?copyId=" + cfg.CopyPrivateID.String()
|
|
|
|
}
|
2020-12-21 17:03:39 +00:00
|
|
|
l := &Logger{
|
2022-03-15 02:52:06 +00:00
|
|
|
privateID: cfg.PrivateID,
|
2020-02-05 22:16:58 +00:00
|
|
|
stderr: cfg.Stderr,
|
2021-05-25 22:02:52 +00:00
|
|
|
stderrLevel: int64(cfg.StderrLevel),
|
2020-02-05 22:16:58 +00:00
|
|
|
httpc: cfg.HTTPC,
|
2022-10-05 19:25:10 +00:00
|
|
|
url: cfg.BaseURL + "/c/" + cfg.Collection + "/" + cfg.PrivateID.String() + urlSuffix,
|
2020-02-05 22:16:58 +00:00
|
|
|
lowMem: cfg.LowMemory,
|
|
|
|
buffer: cfg.Buffer,
|
|
|
|
skipClientTime: cfg.SkipClientTime,
|
2022-10-13 19:51:17 +00:00
|
|
|
drainWake: make(chan struct{}, 1),
|
2020-02-05 22:16:58 +00:00
|
|
|
sentinel: make(chan int32, 16),
|
2022-12-16 23:01:17 +00:00
|
|
|
flushDelayFn: cfg.FlushDelayFn,
|
2023-07-21 17:10:39 +00:00
|
|
|
clock: cfg.Clock,
|
2021-11-16 04:52:43 +00:00
|
|
|
metricsDelta: cfg.MetricsDelta,
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2022-05-18 05:28:57 +00:00
|
|
|
procID: procID,
|
|
|
|
includeProcSequence: cfg.IncludeProcSequence,
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
shutdownStart: make(chan struct{}),
|
|
|
|
shutdownDone: make(chan struct{}),
|
|
|
|
}
|
2023-07-11 17:51:51 +00:00
|
|
|
l.SetSockstatsLabel(sockstats.LabelLogtailLogger)
|
2024-03-21 19:20:38 +00:00
|
|
|
l.compressLogs = cfg.CompressLogs
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
l.uploadCancel = cancel
|
|
|
|
|
|
|
|
go l.uploading(ctx)
|
|
|
|
l.Write([]byte("logtail started"))
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
2020-12-21 17:03:39 +00:00
|
|
|
// Logger writes logs, splitting them as configured between local
|
|
|
|
// logging facilities and uploading to a log server.
|
|
|
|
type Logger struct {
|
2020-02-05 22:16:58 +00:00
|
|
|
stderr io.Writer
|
2021-05-25 22:02:52 +00:00
|
|
|
stderrLevel int64 // accessed atomically
|
2020-02-05 22:16:58 +00:00
|
|
|
httpc *http.Client
|
|
|
|
url string
|
|
|
|
lowMem bool
|
|
|
|
skipClientTime bool
|
2023-04-18 21:26:58 +00:00
|
|
|
netMonitor *netmon.Monitor
|
2020-02-05 22:16:58 +00:00
|
|
|
buffer Buffer
|
2022-12-16 23:01:17 +00:00
|
|
|
drainWake chan struct{} // signal to speed up drain
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
drainBuf []byte // owned by drainPending for reuse
|
2022-12-16 23:01:17 +00:00
|
|
|
flushDelayFn func() time.Duration // negative or zero return value to upload aggressively, or >0 to batch at this delay
|
2022-10-13 19:51:17 +00:00
|
|
|
flushPending atomic.Bool
|
2020-02-05 22:16:58 +00:00
|
|
|
sentinel chan int32
|
2023-07-21 17:10:39 +00:00
|
|
|
clock tstime.Clock
|
2024-03-21 19:20:38 +00:00
|
|
|
compressLogs bool
|
2020-02-05 22:16:58 +00:00
|
|
|
uploadCancel func()
|
2021-07-28 22:36:23 +00:00
|
|
|
explainedRaw bool
|
2021-11-16 04:52:43 +00:00
|
|
|
metricsDelta func() string // or nil
|
2023-03-01 20:18:23 +00:00
|
|
|
privateID logid.PrivateID
|
2022-10-13 19:51:17 +00:00
|
|
|
httpDoCalls atomic.Int32
|
2023-07-11 17:51:51 +00:00
|
|
|
sockstatsLabel atomicSocktatsLabel
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2022-05-18 05:28:57 +00:00
|
|
|
procID uint32
|
|
|
|
includeProcSequence bool
|
2022-07-28 04:06:25 +00:00
|
|
|
|
2022-10-13 19:51:17 +00:00
|
|
|
writeLock sync.Mutex // guards procSequence, flushTimer, buffer.Write calls
|
2022-07-28 04:06:25 +00:00
|
|
|
procSequence uint64
|
2023-07-21 17:10:39 +00:00
|
|
|
flushTimer tstime.TimerController // used when flushDelay is >0
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
writeBuf [bufferSize]byte // owned by Write for reuse
|
|
|
|
jsonDec jsontext.Decoder // owned by appendTextOrJSONLocked for reuse
|
2022-05-18 05:28:57 +00:00
|
|
|
|
2023-02-23 05:24:45 +00:00
|
|
|
shutdownStartMu sync.Mutex // guards the closing of shutdownStart
|
|
|
|
shutdownStart chan struct{} // closed when shutdown begins
|
|
|
|
shutdownDone chan struct{} // closed when shutdown complete
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2023-07-11 17:51:51 +00:00
|
|
|
type atomicSocktatsLabel struct{ p atomic.Uint32 }
|
|
|
|
|
|
|
|
func (p *atomicSocktatsLabel) Load() sockstats.Label { return sockstats.Label(p.p.Load()) }
|
|
|
|
func (p *atomicSocktatsLabel) Store(label sockstats.Label) { p.p.Store(uint32(label)) }
|
|
|
|
|
2020-12-21 18:53:18 +00:00
|
|
|
// SetVerbosityLevel controls the verbosity level that should be
|
|
|
|
// written to stderr. 0 is the default (not verbose). Levels 1 or higher
|
|
|
|
// are increasingly verbose.
|
|
|
|
func (l *Logger) SetVerbosityLevel(level int) {
|
2021-05-25 22:02:52 +00:00
|
|
|
atomic.StoreInt64(&l.stderrLevel, int64(level))
|
2020-12-21 18:53:18 +00:00
|
|
|
}
|
|
|
|
|
2024-04-27 05:06:20 +00:00
|
|
|
// SetNetMon sets the network monitor.
|
2021-03-02 06:09:43 +00:00
|
|
|
//
|
|
|
|
// It should not be changed concurrently with log writes and should
|
|
|
|
// only be set once.
|
2023-04-18 21:26:58 +00:00
|
|
|
func (l *Logger) SetNetMon(lm *netmon.Monitor) {
|
|
|
|
l.netMonitor = lm
|
2021-03-02 06:09:43 +00:00
|
|
|
}
|
|
|
|
|
2023-03-29 03:27:52 +00:00
|
|
|
// SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger.
|
|
|
|
func (l *Logger) SetSockstatsLabel(label sockstats.Label) {
|
2023-07-11 17:51:51 +00:00
|
|
|
l.sockstatsLabel.Store(label)
|
2023-03-29 03:27:52 +00:00
|
|
|
}
|
|
|
|
|
2022-03-15 02:52:06 +00:00
|
|
|
// PrivateID returns the logger's private log ID.
|
|
|
|
//
|
|
|
|
// It exists for internal use only.
|
2023-03-01 20:18:23 +00:00
|
|
|
func (l *Logger) PrivateID() logid.PrivateID { return l.privateID }
|
2022-03-15 02:52:06 +00:00
|
|
|
|
2020-12-21 17:03:39 +00:00
|
|
|
// Shutdown gracefully shuts down the logger while completing any
|
|
|
|
// remaining uploads.
|
|
|
|
//
|
|
|
|
// It will block, continuing to try and upload unless the passed
|
|
|
|
// context object interrupts it by being done.
|
|
|
|
// If the shutdown is interrupted, an error is returned.
|
|
|
|
func (l *Logger) Shutdown(ctx context.Context) error {
|
2020-02-05 22:16:58 +00:00
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
l.uploadCancel()
|
|
|
|
<-l.shutdownDone
|
|
|
|
case <-l.shutdownDone:
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
|
2023-02-23 05:24:45 +00:00
|
|
|
l.shutdownStartMu.Lock()
|
|
|
|
select {
|
|
|
|
case <-l.shutdownStart:
|
|
|
|
l.shutdownStartMu.Unlock()
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
close(l.shutdownStart)
|
2023-02-23 05:24:45 +00:00
|
|
|
l.shutdownStartMu.Unlock()
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
io.WriteString(l, "logger closing down\n")
|
|
|
|
<-done
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-12-21 17:03:39 +00:00
|
|
|
// Close shuts down this logger object, the background log uploader
|
|
|
|
// process, and any associated goroutines.
|
|
|
|
//
|
|
|
|
// Deprecated: use Shutdown
|
|
|
|
func (l *Logger) Close() {
|
2020-02-11 07:36:17 +00:00
|
|
|
l.Shutdown(context.Background())
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 02:13:36 +00:00
|
|
|
// drainBlock is called by drainPending when there are no logs to drain.
|
|
|
|
//
|
2022-10-13 19:51:17 +00:00
|
|
|
// In typical operation, every call to the Write method unblocks and triggers a
|
|
|
|
// buffer.TryReadline, so logs are written with very low latency.
|
2020-03-20 02:13:36 +00:00
|
|
|
//
|
2022-10-13 19:51:17 +00:00
|
|
|
// If the caller specified FlushInterface, drainWake is only sent to
|
|
|
|
// periodically.
|
2020-12-21 17:03:39 +00:00
|
|
|
func (l *Logger) drainBlock() (shuttingDown bool) {
|
2022-10-13 19:51:17 +00:00
|
|
|
select {
|
|
|
|
case <-l.shutdownStart:
|
|
|
|
return true
|
|
|
|
case <-l.drainWake:
|
2020-03-20 02:13:36 +00:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// drainPending drains and encodes a batch of logs from the buffer for upload.
|
|
|
|
// If no logs are available, drainPending blocks until logs are available.
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// The returned buffer is only valid until the next call to drainPending.
|
|
|
|
func (l *Logger) drainPending() (b []byte) {
|
|
|
|
b = l.drainBuf[:0]
|
|
|
|
b = append(b, '[')
|
|
|
|
defer func() {
|
|
|
|
b = bytes.TrimRight(b, ",")
|
|
|
|
b = append(b, ']')
|
|
|
|
l.drainBuf = b
|
|
|
|
if len(b) <= len("[]") {
|
|
|
|
b = nil
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
maxLen := maxSize
|
|
|
|
if l.lowMem {
|
|
|
|
// When operating in a low memory environment, it is better to upload
|
|
|
|
// in multiple operations than it is to allocate a large body and OOM.
|
|
|
|
// Even if maxLen is less than maxSize, we can still upload an entry
|
|
|
|
// that is up to maxSize if we happen to encounter one.
|
|
|
|
maxLen /= lowMemRatio
|
|
|
|
}
|
|
|
|
for len(b) < maxLen {
|
|
|
|
line, err := l.buffer.TryReadLine()
|
|
|
|
switch {
|
|
|
|
case err == io.EOF:
|
|
|
|
return b
|
|
|
|
case err != nil:
|
|
|
|
b = append(b, '{')
|
|
|
|
b = l.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0)
|
|
|
|
b = bytes.TrimRight(b, ",")
|
|
|
|
b = append(b, '}')
|
|
|
|
return b
|
|
|
|
case line == nil:
|
|
|
|
// If we read at least some log entries, return immediately.
|
|
|
|
if len(b) > len("[") {
|
|
|
|
return b
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2024-03-21 13:15:47 +00:00
|
|
|
// We're about to block. If we're holding on to too much memory
|
|
|
|
// in our buffer from a previous large write, let it go.
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
if cap(b) > bufferSize {
|
|
|
|
b = bytes.Clone(b)
|
|
|
|
l.drainBuf = b
|
2024-03-21 13:15:47 +00:00
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
if shuttingDown := l.drainBlock(); shuttingDown {
|
|
|
|
return b
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
switch {
|
|
|
|
case len(line) == 0:
|
2020-02-05 22:16:58 +00:00
|
|
|
continue
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
case line[0] == '{' && jsontext.Value(line).IsValid():
|
|
|
|
// This is already a valid JSON object, so just append it.
|
|
|
|
// This may exceed maxLen, but should be no larger than maxSize
|
|
|
|
// so long as logic writing into the buffer enforces the limit.
|
|
|
|
b = append(b, line...)
|
|
|
|
default:
|
2020-02-05 22:16:58 +00:00
|
|
|
// This is probably a log added to stderr by filch
|
|
|
|
// outside of the logtail logger. Encode it.
|
2021-07-28 22:36:23 +00:00
|
|
|
if !l.explainedRaw {
|
|
|
|
fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n")
|
|
|
|
fmt.Fprintf(l.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n")
|
|
|
|
fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n")
|
|
|
|
fmt.Fprintf(l.stderr, "RAW-STDERR:\n")
|
|
|
|
l.explainedRaw = true
|
|
|
|
}
|
|
|
|
fmt.Fprintf(l.stderr, "RAW-STDERR: %s", b)
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// Do not add a client time, as it could be really old.
|
|
|
|
// Do not include instance key or ID either,
|
|
|
|
// since this came from a different instance.
|
|
|
|
b = l.appendText(b, line, true, 0, 0, 0)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
b = append(b, ',')
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
return b
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is the goroutine that repeatedly uploads logs in the background.
|
2020-12-21 17:03:39 +00:00
|
|
|
func (l *Logger) uploading(ctx context.Context) {
|
2020-02-05 22:16:58 +00:00
|
|
|
defer close(l.shutdownDone)
|
|
|
|
|
|
|
|
for {
|
2024-03-21 13:15:47 +00:00
|
|
|
body := l.drainPending()
|
2020-11-24 02:35:49 +00:00
|
|
|
origlen := -1 // sentinel value: uncompressed
|
|
|
|
// Don't attempt to compress tiny bodies; not worth the CPU cycles.
|
2024-04-02 01:27:01 +00:00
|
|
|
if l.compressLogs && len(body) > 256 {
|
|
|
|
zbody := zstdframe.AppendEncode(nil, body,
|
|
|
|
zstdframe.FastestCompression, zstdframe.LowMemory(true))
|
2024-03-21 19:20:38 +00:00
|
|
|
|
2020-11-24 02:35:49 +00:00
|
|
|
// Only send it compressed if the bandwidth savings are sufficient.
|
|
|
|
// Just the extra headers associated with enabling compression
|
|
|
|
// are 50 bytes by themselves.
|
|
|
|
if len(body)-len(zbody) > 64 {
|
|
|
|
origlen = len(body)
|
|
|
|
body = zbody
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2023-05-11 19:52:35 +00:00
|
|
|
var lastError string
|
|
|
|
var numFailures int
|
|
|
|
var firstFailure time.Time
|
|
|
|
for len(body) > 0 && ctx.Err() == nil {
|
|
|
|
retryAfter, err := l.upload(ctx, body, origlen)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2023-05-11 19:52:35 +00:00
|
|
|
numFailures++
|
2023-07-21 17:10:39 +00:00
|
|
|
firstFailure = l.clock.Now()
|
2023-05-11 19:52:35 +00:00
|
|
|
|
2021-03-02 23:21:32 +00:00
|
|
|
if !l.internetUp() {
|
|
|
|
fmt.Fprintf(l.stderr, "logtail: internet down; waiting\n")
|
|
|
|
l.awaitInternetUp(ctx)
|
|
|
|
continue
|
|
|
|
}
|
2023-05-11 19:52:35 +00:00
|
|
|
|
|
|
|
// Only print the same message once.
|
|
|
|
if currError := err.Error(); lastError != currError {
|
|
|
|
fmt.Fprintf(l.stderr, "logtail: upload: %v\n", err)
|
|
|
|
lastError = currError
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sleep for the specified retryAfter period,
|
|
|
|
// otherwise default to some random value.
|
|
|
|
if retryAfter <= 0 {
|
2024-06-05 21:37:31 +00:00
|
|
|
retryAfter = mrand.N(30*time.Second) + 30*time.Second
|
2023-05-11 19:52:35 +00:00
|
|
|
}
|
|
|
|
tstime.Sleep(ctx, retryAfter)
|
|
|
|
} else {
|
|
|
|
// Only print a success message after recovery.
|
|
|
|
if numFailures > 0 {
|
2023-07-21 17:10:39 +00:00
|
|
|
fmt.Fprintf(l.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, l.clock.Since(firstFailure).Round(time.Second))
|
2023-05-11 19:52:35 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-l.shutdownStart:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-02 23:21:32 +00:00
|
|
|
func (l *Logger) internetUp() bool {
|
2023-04-18 21:26:58 +00:00
|
|
|
if l.netMonitor == nil {
|
2021-03-02 23:21:32 +00:00
|
|
|
// No way to tell, so assume it is.
|
|
|
|
return true
|
|
|
|
}
|
2023-04-18 21:26:58 +00:00
|
|
|
return l.netMonitor.InterfaceState().AnyInterfaceUp()
|
2021-03-02 23:21:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Logger) awaitInternetUp(ctx context.Context) {
|
|
|
|
upc := make(chan bool, 1)
|
2023-08-23 17:05:21 +00:00
|
|
|
defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) {
|
|
|
|
if delta.New.AnyInterfaceUp() {
|
2021-03-02 23:21:32 +00:00
|
|
|
select {
|
|
|
|
case upc <- true:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})()
|
|
|
|
if l.internetUp() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-upc:
|
|
|
|
fmt.Fprintf(l.stderr, "logtail: internet back up\n")
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-24 02:35:49 +00:00
|
|
|
// upload uploads body to the log server.
|
|
|
|
// origlen indicates the pre-compression body length.
|
|
|
|
// origlen of -1 indicates that the body is not compressed.
|
2023-05-11 19:52:35 +00:00
|
|
|
func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) {
|
2022-05-27 22:59:08 +00:00
|
|
|
const maxUploadTime = 45 * time.Second
|
2023-07-11 17:51:51 +00:00
|
|
|
ctx = sockstats.WithSockStats(ctx, l.sockstatsLabel.Load(), l.Logf)
|
2022-05-27 22:59:08 +00:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, maxUploadTime)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
req, err := http.NewRequestWithContext(ctx, "POST", l.url, bytes.NewReader(body))
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
// I know of no conditions under which this could fail.
|
|
|
|
// Report it very loudly.
|
|
|
|
// TODO record logs to disk
|
|
|
|
panic("logtail: cannot build http request: " + err.Error())
|
|
|
|
}
|
2020-11-24 02:35:49 +00:00
|
|
|
if origlen != -1 {
|
2020-02-05 22:16:58 +00:00
|
|
|
req.Header.Add("Content-Encoding", "zstd")
|
2020-11-24 02:35:49 +00:00
|
|
|
req.Header.Add("Orig-Content-Length", strconv.Itoa(origlen))
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2024-03-29 17:26:31 +00:00
|
|
|
if runtime.GOOS == "js" {
|
|
|
|
// We once advertised we'd accept optional client certs (for internal use)
|
|
|
|
// on log.tailscale.io but then Tailscale SSH js/wasm clients prompted
|
|
|
|
// users (on some browsers?) to pick a client cert. We'll fix the server's
|
|
|
|
// TLS ServerHello, but we can also fix it client side for good measure.
|
|
|
|
//
|
|
|
|
// Corp details: https://github.com/tailscale/corp/issues/18177#issuecomment-2026598715
|
|
|
|
// and https://github.com/tailscale/corp/pull/18775#issuecomment-2027505036
|
|
|
|
//
|
|
|
|
// See https://github.com/golang/go/wiki/WebAssembly#configuring-fetch-options-while-using-nethttp
|
|
|
|
// and https://developer.mozilla.org/en-US/docs/Web/API/fetch#credentials
|
|
|
|
req.Header.Set("js.fetch:credentials", "omit")
|
|
|
|
}
|
2020-04-05 15:45:29 +00:00
|
|
|
req.Header["User-Agent"] = nil // not worth writing one; save some bytes
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
compressedNote := "not-compressed"
|
2020-11-24 02:35:49 +00:00
|
|
|
if origlen != -1 {
|
2020-02-05 22:16:58 +00:00
|
|
|
compressedNote = "compressed"
|
|
|
|
}
|
|
|
|
|
2022-10-13 19:51:17 +00:00
|
|
|
l.httpDoCalls.Add(1)
|
2020-02-05 22:16:58 +00:00
|
|
|
resp, err := l.httpc.Do(req)
|
|
|
|
if err != nil {
|
2023-05-11 19:52:35 +00:00
|
|
|
return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2020-04-04 17:46:54 +00:00
|
|
|
|
2023-05-11 19:52:35 +00:00
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
n, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
|
|
|
|
b, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<10))
|
|
|
|
return time.Duration(n) * time.Second, fmt.Errorf("log upload of %d bytes %s failed %d: %s", len(body), compressedNote, resp.StatusCode, bytes.TrimSpace(b))
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2023-05-11 19:52:35 +00:00
|
|
|
return 0, nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2023-01-05 19:00:42 +00:00
|
|
|
// Flush uploads all logs to the server. It blocks until complete or there is an
|
|
|
|
// unrecoverable error.
|
|
|
|
//
|
|
|
|
// TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35.
|
|
|
|
// Finish cleaning this up.
|
2020-12-21 17:03:39 +00:00
|
|
|
func (l *Logger) Flush() error {
|
2020-02-05 22:16:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-05 19:00:42 +00:00
|
|
|
// StartFlush starts a log upload, if anything is pending.
|
|
|
|
//
|
|
|
|
// If l is nil, StartFlush is a no-op.
|
|
|
|
func (l *Logger) StartFlush() {
|
|
|
|
if l != nil {
|
|
|
|
l.tryDrainWake()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-18 20:43:03 +00:00
|
|
|
// logtailDisabled is whether logtail uploads to logcatcher are disabled.
|
2022-08-04 04:51:02 +00:00
|
|
|
var logtailDisabled atomic.Bool
|
2022-04-18 20:43:03 +00:00
|
|
|
|
|
|
|
// Disable disables logtail uploads for the lifetime of the process.
|
|
|
|
func Disable() {
|
2022-08-04 04:51:02 +00:00
|
|
|
logtailDisabled.Store(true)
|
2022-04-18 20:43:03 +00:00
|
|
|
}
|
|
|
|
|
2022-10-13 19:51:17 +00:00
|
|
|
var debugWakesAndUploads = envknob.RegisterBool("TS_DEBUG_LOGTAIL_WAKES")
|
|
|
|
|
|
|
|
// tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup.
|
|
|
|
// It does not block.
|
|
|
|
func (l *Logger) tryDrainWake() {
|
|
|
|
l.flushPending.Store(false)
|
|
|
|
if debugWakesAndUploads() {
|
|
|
|
// Using println instead of log.Printf here to avoid recursing back into
|
|
|
|
// ourselves.
|
|
|
|
println("logtail: try drain wake, numHTTP:", l.httpDoCalls.Load())
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case l.drainWake <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-28 04:06:25 +00:00
|
|
|
func (l *Logger) sendLocked(jsonBlob []byte) (int, error) {
|
2022-12-24 04:54:30 +00:00
|
|
|
tapSend(jsonBlob)
|
2022-08-04 04:51:02 +00:00
|
|
|
if logtailDisabled.Load() {
|
2022-04-18 20:43:03 +00:00
|
|
|
return len(jsonBlob), nil
|
|
|
|
}
|
2022-10-13 19:51:17 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
n, err := l.buffer.Write(jsonBlob)
|
2022-10-13 19:51:17 +00:00
|
|
|
|
2022-12-16 23:01:17 +00:00
|
|
|
flushDelay := defaultFlushDelay
|
|
|
|
if l.flushDelayFn != nil {
|
|
|
|
flushDelay = l.flushDelayFn()
|
|
|
|
}
|
|
|
|
if flushDelay > 0 {
|
2022-10-13 19:51:17 +00:00
|
|
|
if l.flushPending.CompareAndSwap(false, true) {
|
|
|
|
if l.flushTimer == nil {
|
2023-07-21 17:10:39 +00:00
|
|
|
l.flushTimer = l.clock.AfterFunc(flushDelay, l.tryDrainWake)
|
2022-10-13 19:51:17 +00:00
|
|
|
} else {
|
2022-12-16 23:01:17 +00:00
|
|
|
l.flushTimer.Reset(flushDelay)
|
2022-10-13 19:51:17 +00:00
|
|
|
}
|
2020-03-20 02:13:36 +00:00
|
|
|
}
|
2022-10-13 19:51:17 +00:00
|
|
|
} else {
|
|
|
|
l.tryDrainWake()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// appendMetadata appends optional "logtail", "metrics", and "v" JSON members.
|
|
|
|
// This assumes dst is already within a JSON object.
|
|
|
|
// Each member is comma-terminated.
|
|
|
|
func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte {
|
|
|
|
// Append optional logtail metadata.
|
|
|
|
if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil {
|
|
|
|
dst = append(dst, `"logtail":{`...)
|
2022-05-18 05:28:57 +00:00
|
|
|
if !skipClientTime {
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = append(dst, `"client_time":"`...)
|
|
|
|
dst = l.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano)
|
|
|
|
dst = append(dst, '"', ',')
|
2022-05-18 05:28:57 +00:00
|
|
|
}
|
|
|
|
if procID != 0 {
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = append(dst, `"proc_id":`...)
|
|
|
|
dst = strconv.AppendUint(dst, uint64(procID), 10)
|
|
|
|
dst = append(dst, ',')
|
2022-05-18 05:28:57 +00:00
|
|
|
}
|
|
|
|
if procSequence != 0 {
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = append(dst, `"proc_seq":`...)
|
|
|
|
dst = strconv.AppendUint(dst, procSequence, 10)
|
|
|
|
dst = append(dst, ',')
|
2022-05-18 05:28:57 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
if errDetail != "" || errData != nil {
|
|
|
|
dst = append(dst, `"error":{`...)
|
|
|
|
if errDetail != "" {
|
|
|
|
dst = append(dst, `"detail":`...)
|
|
|
|
dst, _ = jsontext.AppendQuote(dst, errDetail)
|
|
|
|
dst = append(dst, ',')
|
|
|
|
}
|
|
|
|
if errData != nil {
|
|
|
|
dst = append(dst, `"bad_data":`...)
|
|
|
|
dst = append(dst, errData...)
|
|
|
|
dst = append(dst, ',')
|
|
|
|
}
|
|
|
|
dst = bytes.TrimRight(dst, ",")
|
|
|
|
dst = append(dst, '}', ',')
|
|
|
|
}
|
|
|
|
dst = bytes.TrimRight(dst, ",")
|
|
|
|
dst = append(dst, '}', ',')
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// Append optional metrics metadata.
|
|
|
|
if !skipMetrics && l.metricsDelta != nil {
|
2021-11-16 04:52:43 +00:00
|
|
|
if d := l.metricsDelta(); d != "" {
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = append(dst, `"metrics":"`...)
|
|
|
|
dst = append(dst, d...)
|
|
|
|
dst = append(dst, '"', ',')
|
2021-11-16 04:52:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// Add the optional log level, if non-zero.
|
|
|
|
// Note that we only use log levels 1 and 2 currently.
|
|
|
|
// It's unlikely we'll ever make it past 9.
|
2022-02-13 16:54:23 +00:00
|
|
|
if level > 0 && level < 10 {
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = append(dst, `"v":`...)
|
|
|
|
dst = append(dst, '0'+byte(level))
|
|
|
|
dst = append(dst, ',')
|
2022-02-13 16:54:23 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// appendText appends a raw text message in the Tailscale JSON log entry format.
|
|
|
|
func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte {
|
|
|
|
dst = slices.Grow(dst, len(src))
|
|
|
|
dst = append(dst, '{')
|
|
|
|
dst = l.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level)
|
|
|
|
if len(src) == 0 {
|
|
|
|
dst = bytes.TrimRight(dst, ",")
|
|
|
|
return append(dst, "}\n"...)
|
2022-01-13 22:02:46 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
|
|
|
|
// Append the text string, which may be truncated.
|
|
|
|
// Invalid UTF-8 will be mangled with the Unicode replacement character.
|
|
|
|
max := maxTextSize
|
|
|
|
if l.lowMem {
|
|
|
|
max /= lowMemRatio
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = append(dst, `"text":`...)
|
|
|
|
dst = appendTruncatedString(dst, src, max)
|
|
|
|
return append(dst, "}\n"...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// appendTruncatedString appends a JSON string for src,
|
|
|
|
// truncating the src to be no larger than n.
|
|
|
|
func appendTruncatedString(dst, src []byte, n int) []byte {
|
|
|
|
srcLen := len(src)
|
|
|
|
src = truncate.String(src, n)
|
|
|
|
dst, _ = jsontext.AppendQuote(dst, src) // ignore error; only occurs for invalid UTF-8
|
|
|
|
if srcLen > len(src) {
|
|
|
|
dst = dst[:len(dst)-len(`"`)] // trim off preceding double-quote
|
|
|
|
dst = append(dst, "…+"...)
|
|
|
|
dst = strconv.AppendInt(dst, int64(srcLen-len(src)), 10)
|
|
|
|
dst = append(dst, '"') // re-append succeeding double-quote
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *Logger) AppendTextOrJSONLocked(dst, src []byte) []byte {
|
|
|
|
l.clock = tstime.StdClock{}
|
|
|
|
return l.appendTextOrJSONLocked(dst, src, 0)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// appendTextOrJSONLocked appends a raw text message or a raw JSON object
|
|
|
|
// in the Tailscale JSON log format.
|
|
|
|
func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
|
2022-05-18 05:28:57 +00:00
|
|
|
if l.includeProcSequence {
|
|
|
|
l.procSequence++
|
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
if len(src) == 0 || src[0] != '{' {
|
|
|
|
return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// Check whether the input is a valid JSON object and
|
|
|
|
// whether it contains the reserved "logtail" name at the top-level.
|
|
|
|
var logtailKeyOffset, logtailValOffset, logtailValLength int
|
|
|
|
validJSON := func() bool {
|
|
|
|
// TODO(dsnet): Avoid allocation of bytes.Buffer struct.
|
|
|
|
dec := &l.jsonDec
|
|
|
|
dec.Reset(bytes.NewBuffer(src))
|
|
|
|
if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil {
|
|
|
|
return false
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
for dec.PeekKind() != '}' {
|
|
|
|
keyOffset := dec.InputOffset()
|
|
|
|
tok, err := dec.ReadToken()
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
isLogtail := tok.String() == "logtail"
|
|
|
|
valOffset := dec.InputOffset()
|
|
|
|
if dec.SkipValue() != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if isLogtail {
|
|
|
|
logtailKeyOffset = int(keyOffset)
|
|
|
|
logtailValOffset = int(valOffset)
|
|
|
|
logtailValLength = int(dec.InputOffset()) - logtailValOffset
|
|
|
|
}
|
2022-05-18 05:28:57 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
if tok, err := dec.ReadToken(); tok.Kind() != '}' || err != nil {
|
|
|
|
return false
|
2022-05-18 05:28:57 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
if _, err := dec.ReadToken(); err != io.EOF {
|
|
|
|
return false // trailing junk after JSON object
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
return true
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Treat invalid JSON as a raw text message.
|
|
|
|
if !validJSON {
|
|
|
|
return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether the JSON payload is too large.
|
|
|
|
// Due to logtail metadata, the formatted log entry could exceed maxSize.
|
|
|
|
// That's okay as the Tailscale log service limit is actually 2*maxSize.
|
|
|
|
// However, so long as logging applications aim to target the maxSize limit,
|
|
|
|
// there should be no trouble eventually uploading logs.
|
|
|
|
if len(src) > maxSize {
|
|
|
|
errDetail := fmt.Sprintf("entry too large: %d bytes", len(src))
|
|
|
|
errData := appendTruncatedString(nil, src, maxSize/len(`\uffff`)) // escaping could increase size
|
|
|
|
|
|
|
|
dst = append(dst, '{')
|
|
|
|
dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level)
|
|
|
|
dst = bytes.TrimRight(dst, ",")
|
|
|
|
return append(dst, "}\n"...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether the reserved logtail member occurs in the log data.
|
|
|
|
// If so, it is moved to the the logtail/error member.
|
|
|
|
const jsonSeperators = ",:" // per RFC 8259, section 2
|
|
|
|
const jsonWhitespace = " \n\r\t" // per RFC 8259, section 2
|
|
|
|
var errDetail string
|
|
|
|
var errData jsontext.Value
|
|
|
|
if logtailValLength > 0 {
|
|
|
|
errDetail = "duplicate logtail member"
|
|
|
|
errData = bytes.Trim(src[logtailValOffset:][:logtailValLength], jsonSeperators+jsonWhitespace)
|
|
|
|
}
|
|
|
|
dst = slices.Grow(dst, len(src))
|
|
|
|
dst = append(dst, '{')
|
|
|
|
dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level)
|
|
|
|
if logtailValLength > 0 {
|
|
|
|
// Exclude original logtail member from the message.
|
|
|
|
dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset])
|
|
|
|
dst = bytes.TrimRight(dst, jsonSeperators+jsonWhitespace)
|
|
|
|
dst = appendWithoutNewline(dst, src[logtailValOffset+logtailValLength:])
|
|
|
|
} else {
|
|
|
|
dst = appendWithoutNewline(dst, src[len("{"):])
|
2022-02-18 04:41:49 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
dst = bytes.TrimRight(dst, jsonWhitespace)
|
|
|
|
dst = dst[:len(dst)-len("}")]
|
|
|
|
dst = bytes.TrimRight(dst, jsonSeperators+jsonWhitespace)
|
|
|
|
return append(dst, "}\n"...)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
// appendWithoutNewline appends src to dst except that it ignores newlines
|
|
|
|
// since newlines are used to frame individual log entries.
|
|
|
|
func appendWithoutNewline(dst, src []byte) []byte {
|
|
|
|
for _, c := range src {
|
|
|
|
if c != '\n' {
|
|
|
|
dst = append(dst, c)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
return dst
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-12-16 03:07:52 +00:00
|
|
|
// Logf logs to l using the provided fmt-style format and optional arguments.
|
2022-03-16 23:27:57 +00:00
|
|
|
func (l *Logger) Logf(format string, args ...any) {
|
2021-12-16 03:07:52 +00:00
|
|
|
fmt.Fprintf(l, format, args...)
|
|
|
|
}
|
|
|
|
|
2023-12-15 23:15:49 +00:00
|
|
|
var obscureIPs = envknob.RegisterBool("TS_OBSCURE_LOGGED_IPS")
|
|
|
|
|
2020-12-21 17:03:39 +00:00
|
|
|
// Write logs an encoded JSON blob.
|
|
|
|
//
|
|
|
|
// If the []byte passed to Write is not an encoded JSON blob,
|
|
|
|
// then contents is fit into a JSON blob and written.
|
|
|
|
//
|
|
|
|
// This is intended as an interface for the stdlib "log" package.
|
|
|
|
func (l *Logger) Write(buf []byte) (int, error) {
|
2020-02-05 22:16:58 +00:00
|
|
|
if len(buf) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
2023-11-08 19:48:36 +00:00
|
|
|
inLen := len(buf) // length as provided to us, before modifications to downstream writers
|
|
|
|
|
2020-12-21 18:53:18 +00:00
|
|
|
level, buf := parseAndRemoveLogLevel(buf)
|
2022-09-15 12:06:59 +00:00
|
|
|
if l.stderr != nil && l.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&l.stderrLevel) {
|
2020-02-05 22:16:58 +00:00
|
|
|
if buf[len(buf)-1] == '\n' {
|
|
|
|
l.stderr.Write(buf)
|
|
|
|
} else {
|
|
|
|
// The log package always line-terminates logs,
|
|
|
|
// so this is an uncommon path.
|
2020-03-16 05:41:50 +00:00
|
|
|
withNL := append(buf[:len(buf):len(buf)], '\n')
|
|
|
|
l.stderr.Write(withNL)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
2022-07-28 04:06:25 +00:00
|
|
|
|
2023-12-15 23:15:49 +00:00
|
|
|
if obscureIPs() {
|
|
|
|
buf = redactIPs(buf)
|
|
|
|
}
|
|
|
|
|
2022-05-18 05:28:57 +00:00
|
|
|
l.writeLock.Lock()
|
2022-07-28 04:06:25 +00:00
|
|
|
defer l.writeLock.Unlock()
|
|
|
|
|
logtail: optimize JSON processing (#11671)
Changes made:
* Avoid "encoding/json" for JSON processing, and instead use
"github.com/go-json-experiment/json/jsontext".
Use jsontext.Value.IsValid for validation, which is much faster.
Use jsontext.AppendQuote instead of our own JSON escaping.
* In drainPending, use a different maxLen depending on lowMem.
In lowMem mode, it is better to perform multiple uploads
than it is to construct a large body that OOMs the process.
* In drainPending, if an error is encountered draining,
construct an error message in the logtail JSON format
rather than something that is invalid JSON.
* In appendTextOrJSONLocked, use jsontext.Decoder to check
whether the input is a valid JSON object. This is faster than
the previous approach of unmarshaling into map[string]any and
then re-marshaling that data structure.
This is especially beneficial for network flow logging,
which produces relatively large JSON objects.
* In appendTextOrJSONLocked, enforce maxSize on the input.
If too large, then we may end up in a situation where the logs
can never be uploaded because it exceeds the maximum body size
that the Tailscale logs service accepts.
* Use "tailscale.com/util/truncate" to properly truncate a string
on valid UTF-8 boundaries.
* In general, remove unnecessary spaces in JSON output.
Performance:
name old time/op new time/op delta
WriteText 776ns ± 2% 596ns ± 1% -23.24% (p=0.000 n=10+10)
WriteJSON 110µs ± 0% 9µs ± 0% -91.77% (p=0.000 n=8+8)
name old alloc/op new alloc/op delta
WriteText 448B ± 0% 0B -100.00% (p=0.000 n=10+10)
WriteJSON 37.9kB ± 0% 0.0kB ± 0% -99.87% (p=0.000 n=10+10)
name old allocs/op new allocs/op delta
WriteText 1.00 ± 0% 0.00 -100.00% (p=0.000 n=10+10)
WriteJSON 1.08k ± 0% 0.00k ± 0% -99.91% (p=0.000 n=10+10)
For text payloads, this is 1.30x faster.
For JSON payloads, this is 12.2x faster.
Updates #cleanup
Updates tailscale/corp#18514
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2024-04-12 19:05:36 +00:00
|
|
|
b := l.appendTextOrJSONLocked(l.writeBuf[:0], buf, level)
|
2022-07-28 04:06:25 +00:00
|
|
|
_, err := l.sendLocked(b)
|
2023-11-08 19:48:36 +00:00
|
|
|
return inLen, err
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-12-21 18:53:18 +00:00
|
|
|
|
2023-12-15 23:15:49 +00:00
|
|
|
var (
|
|
|
|
regexMatchesIPv6 = regexp.MustCompile(`([0-9a-fA-F]{1,4}):([0-9a-fA-F]{1,4}):([0-9a-fA-F:]{1,4})*`)
|
|
|
|
regexMatchesIPv4 = regexp.MustCompile(`(\d{1,3})\.(\d{1,3})\.\d{1,3}\.\d{1,3}`)
|
|
|
|
)
|
|
|
|
|
|
|
|
// redactIPs is a helper function used in Write() to redact IPs (other than tailscale IPs).
|
|
|
|
// This function takes a log line as a byte slice and
|
|
|
|
// uses regex matching to parse and find IP addresses. Based on if the IP address is IPv4 or
|
|
|
|
// IPv6, it parses and replaces the end of the addresses with an "x". This function returns the
|
|
|
|
// log line with the IPs redacted.
|
|
|
|
func redactIPs(buf []byte) []byte {
|
|
|
|
out := regexMatchesIPv6.ReplaceAllFunc(buf, func(b []byte) []byte {
|
|
|
|
ip, err := netip.ParseAddr(string(b))
|
|
|
|
if err != nil || tsaddr.IsTailscaleIP(ip) {
|
|
|
|
return b // don't change this one
|
|
|
|
}
|
|
|
|
|
|
|
|
prefix := bytes.Split(b, []byte(":"))
|
|
|
|
return bytes.Join(append(prefix[:2], []byte("x")), []byte(":"))
|
|
|
|
})
|
|
|
|
|
|
|
|
out = regexMatchesIPv4.ReplaceAllFunc(out, func(b []byte) []byte {
|
|
|
|
ip, err := netip.ParseAddr(string(b))
|
|
|
|
if err != nil || tsaddr.IsTailscaleIP(ip) {
|
|
|
|
return b // don't change this one
|
|
|
|
}
|
|
|
|
|
|
|
|
prefix := bytes.Split(b, []byte("."))
|
|
|
|
return bytes.Join(append(prefix[:2], []byte("x.x")), []byte("."))
|
|
|
|
})
|
|
|
|
|
|
|
|
return []byte(out)
|
|
|
|
}
|
|
|
|
|
2020-12-21 18:53:18 +00:00
|
|
|
var (
|
|
|
|
openBracketV = []byte("[v")
|
|
|
|
v1 = []byte("[v1] ")
|
|
|
|
v2 = []byte("[v2] ")
|
2022-02-18 04:41:49 +00:00
|
|
|
vJSON = []byte("[v\x00JSON]") // precedes log level '0'-'9' byte, then JSON value
|
2020-12-21 18:53:18 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// level 0 is normal (or unknown) level; 1+ are increasingly verbose
|
|
|
|
func parseAndRemoveLogLevel(buf []byte) (level int, cleanBuf []byte) {
|
|
|
|
if len(buf) == 0 || buf[0] == '{' || !bytes.Contains(buf, openBracketV) {
|
|
|
|
return 0, buf
|
|
|
|
}
|
|
|
|
if bytes.Contains(buf, v1) {
|
|
|
|
return 1, bytes.ReplaceAll(buf, v1, nil)
|
|
|
|
}
|
|
|
|
if bytes.Contains(buf, v2) {
|
|
|
|
return 2, bytes.ReplaceAll(buf, v2, nil)
|
|
|
|
}
|
2022-02-18 04:41:49 +00:00
|
|
|
if i := bytes.Index(buf, vJSON); i != -1 {
|
|
|
|
rest := buf[i+len(vJSON):]
|
|
|
|
if len(rest) >= 2 {
|
|
|
|
v := rest[0]
|
|
|
|
if v >= '0' && v <= '9' {
|
|
|
|
return int(v - '0'), rest[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-21 18:53:18 +00:00
|
|
|
return 0, buf
|
|
|
|
}
|
2022-12-24 04:54:30 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
tapSetSize atomic.Int32
|
|
|
|
tapMu sync.Mutex
|
|
|
|
tapSet set.HandleSet[chan<- string]
|
|
|
|
)
|
|
|
|
|
|
|
|
// RegisterLogTap registers dst to get a copy of every log write. The caller
|
|
|
|
// must call unregister when done watching.
|
|
|
|
//
|
|
|
|
// This would ideally be a method on Logger, but Logger isn't really available
|
|
|
|
// in most places; many writes go via stderr which filch redirects to the
|
|
|
|
// singleton Logger set up early. For better or worse, there's basically only
|
|
|
|
// one Logger within the program. This mechanism at least works well for
|
|
|
|
// tailscaled. It works less well for a binary with multiple tsnet.Servers. Oh
|
|
|
|
// well. This then subscribes to all of them.
|
|
|
|
func RegisterLogTap(dst chan<- string) (unregister func()) {
|
|
|
|
tapMu.Lock()
|
|
|
|
defer tapMu.Unlock()
|
|
|
|
h := tapSet.Add(dst)
|
|
|
|
tapSetSize.Store(int32(len(tapSet)))
|
|
|
|
return func() {
|
|
|
|
tapMu.Lock()
|
|
|
|
defer tapMu.Unlock()
|
|
|
|
delete(tapSet, h)
|
|
|
|
tapSetSize.Store(int32(len(tapSet)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// tapSend relays the JSON blob to any/all registered local debug log watchers
|
|
|
|
// (somebody running "tailscale debug daemon-logs").
|
|
|
|
func tapSend(jsonBlob []byte) {
|
|
|
|
if tapSetSize.Load() == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s := string(jsonBlob)
|
|
|
|
tapMu.Lock()
|
|
|
|
defer tapMu.Unlock()
|
|
|
|
for _, dst := range tapSet {
|
|
|
|
select {
|
|
|
|
case dst <- s:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|