2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
package logtail
|
|
|
|
|
|
|
|
import (
|
2022-01-13 22:02:46 +00:00
|
|
|
"bytes"
|
2020-02-05 22:16:58 +00:00
|
|
|
"context"
|
2021-01-12 21:31:45 +00:00
|
|
|
"encoding/json"
|
|
|
|
"io"
|
2021-01-10 23:03:57 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2021-01-12 21:31:45 +00:00
|
|
|
"strings"
|
2020-02-05 22:16:58 +00:00
|
|
|
"testing"
|
2020-04-04 21:16:33 +00:00
|
|
|
"time"
|
2021-10-27 23:21:44 +00:00
|
|
|
|
|
|
|
"tailscale.com/tstest"
|
2023-07-21 17:10:39 +00:00
|
|
|
"tailscale.com/tstime"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestFastShutdown(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
cancel()
|
|
|
|
|
2021-01-11 14:22:35 +00:00
|
|
|
testServ := httptest.NewServer(http.HandlerFunc(
|
|
|
|
func(w http.ResponseWriter, r *http.Request) {}))
|
|
|
|
defer testServ.Close()
|
|
|
|
|
2020-12-21 17:03:39 +00:00
|
|
|
l := NewLogger(Config{
|
2021-01-11 14:22:35 +00:00
|
|
|
BaseURL: testServ.URL,
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 02:59:54 +00:00
|
|
|
}, t.Logf)
|
2021-01-12 21:31:45 +00:00
|
|
|
err := l.Shutdown(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-04-04 21:16:33 +00:00
|
|
|
|
2021-01-12 21:31:45 +00:00
|
|
|
// maximum number of times a test will call l.Write()
|
|
|
|
const logLines = 3
|
|
|
|
|
|
|
|
type LogtailTestServer struct {
|
|
|
|
srv *httptest.Server // Log server
|
|
|
|
uploaded chan []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) {
|
|
|
|
ts := LogtailTestServer{}
|
|
|
|
|
|
|
|
// max channel backlog = 1 "started" + #logLines x "log line" + 1 "closed"
|
|
|
|
ts.uploaded = make(chan []byte, 2+logLines)
|
|
|
|
|
|
|
|
ts.srv = httptest.NewServer(http.HandlerFunc(
|
2021-01-10 23:03:57 +00:00
|
|
|
func(w http.ResponseWriter, r *http.Request) {
|
2022-09-15 12:06:59 +00:00
|
|
|
body, err := io.ReadAll(r.Body)
|
2021-01-12 21:31:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error("failed to read HTTP request")
|
|
|
|
}
|
|
|
|
ts.uploaded <- body
|
2021-01-10 23:03:57 +00:00
|
|
|
}))
|
|
|
|
|
2021-01-12 21:31:45 +00:00
|
|
|
t.Cleanup(ts.srv.Close)
|
|
|
|
|
|
|
|
l := NewLogger(Config{BaseURL: ts.srv.URL}, t.Logf)
|
|
|
|
|
|
|
|
// There is always an initial "logtail started" message
|
|
|
|
body := <-ts.uploaded
|
|
|
|
if !strings.Contains(string(body), "started") {
|
|
|
|
t.Errorf("unknown start logging statement: %q", string(body))
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ts, l
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDrainPendingMessages(t *testing.T) {
|
|
|
|
ts, l := NewLogtailTestHarness(t)
|
|
|
|
|
|
|
|
for i := 0; i < logLines; i++ {
|
2021-01-10 23:03:57 +00:00
|
|
|
l.Write([]byte("log line"))
|
|
|
|
}
|
|
|
|
|
2021-01-12 21:31:45 +00:00
|
|
|
// all of the "log line" messages usually arrive at once, but poll if needed.
|
|
|
|
body := ""
|
|
|
|
for i := 0; i <= logLines; i++ {
|
|
|
|
body += string(<-ts.uploaded)
|
|
|
|
count := strings.Count(body, "log line")
|
|
|
|
if count == logLines {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// if we never find count == logLines, the test will eventually time out.
|
|
|
|
}
|
|
|
|
|
|
|
|
err := l.Shutdown(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEncodeAndUploadMessages(t *testing.T) {
|
|
|
|
ts, l := NewLogtailTestHarness(t)
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
log string
|
|
|
|
want string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"plain text",
|
|
|
|
"log line",
|
|
|
|
"log line",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"simple JSON",
|
|
|
|
`{"text": "log line"}`,
|
|
|
|
"log line",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
io.WriteString(l, tt.log)
|
|
|
|
body := <-ts.uploaded
|
|
|
|
|
2021-08-17 19:14:38 +00:00
|
|
|
data := unmarshalOne(t, body)
|
2021-01-12 21:31:45 +00:00
|
|
|
got := data["text"]
|
|
|
|
if got != tt.want {
|
|
|
|
t.Errorf("%s: got %q; want %q", tt.name, got.(string), tt.want)
|
|
|
|
}
|
|
|
|
|
|
|
|
ltail, ok := data["logtail"]
|
|
|
|
if ok {
|
2022-03-16 23:27:57 +00:00
|
|
|
logtailmap := ltail.(map[string]any)
|
2021-01-12 21:31:45 +00:00
|
|
|
_, ok = logtailmap["client_time"]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("%s: no client_time present", tt.name)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.Errorf("%s: no logtail map present", tt.name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := l.Shutdown(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEncodeSpecialCases(t *testing.T) {
|
|
|
|
ts, l := NewLogtailTestHarness(t)
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// JSON log message already contains a logtail field.
|
|
|
|
io.WriteString(l, `{"logtail": "LOGTAIL", "text": "text"}`)
|
|
|
|
body := <-ts.uploaded
|
2021-08-17 19:14:38 +00:00
|
|
|
data := unmarshalOne(t, body)
|
2021-01-12 21:31:45 +00:00
|
|
|
errorHasLogtail, ok := data["error_has_logtail"]
|
|
|
|
if ok {
|
|
|
|
if errorHasLogtail != "LOGTAIL" {
|
|
|
|
t.Errorf("error_has_logtail: got:%q; want:%q",
|
|
|
|
errorHasLogtail, "LOGTAIL")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.Errorf("no error_has_logtail field: %v", data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// special characters
|
|
|
|
io.WriteString(l, "\b\f\n\r\t"+`"\`)
|
|
|
|
bodytext := string(<-ts.uploaded)
|
|
|
|
// json.Unmarshal would unescape the characters, we have to look at the encoded text
|
|
|
|
escaped := strings.Contains(bodytext, `\b\f\n\r\t\"\`)
|
|
|
|
if !escaped {
|
|
|
|
t.Errorf("special characters got %s", bodytext)
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// skipClientTime to omit the logtail metadata
|
|
|
|
l.skipClientTime = true
|
|
|
|
io.WriteString(l, "text")
|
|
|
|
body = <-ts.uploaded
|
2021-08-17 19:14:38 +00:00
|
|
|
data = unmarshalOne(t, body)
|
2021-01-12 21:31:45 +00:00
|
|
|
_, ok = data["logtail"]
|
|
|
|
if ok {
|
|
|
|
t.Errorf("skipClientTime: unexpected logtail map present: %v", data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// lowMem + long string
|
|
|
|
l.skipClientTime = false
|
|
|
|
l.lowMem = true
|
2023-02-08 05:15:12 +00:00
|
|
|
longStr := strings.Repeat("0", 5120)
|
2021-01-12 21:31:45 +00:00
|
|
|
io.WriteString(l, longStr)
|
|
|
|
body = <-ts.uploaded
|
2021-08-17 19:14:38 +00:00
|
|
|
data = unmarshalOne(t, body)
|
2021-01-12 21:31:45 +00:00
|
|
|
text, ok := data["text"]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("lowMem: no text %v", data)
|
|
|
|
}
|
2023-02-08 05:15:12 +00:00
|
|
|
if n := len(text.(string)); n > 4500 {
|
|
|
|
t.Errorf("lowMem: got %d chars; want <4500 chars", n)
|
2021-01-12 21:31:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
|
2021-08-17 19:14:38 +00:00
|
|
|
err := l.Shutdown(context.Background())
|
2021-01-12 21:31:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
2021-01-10 23:03:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-04 21:16:33 +00:00
|
|
|
var sink []byte
|
|
|
|
|
|
|
|
func TestLoggerEncodeTextAllocs(t *testing.T) {
|
2023-07-21 17:10:39 +00:00
|
|
|
lg := &Logger{clock: tstime.StdClock{}}
|
2020-04-04 21:16:33 +00:00
|
|
|
inBuf := []byte("some text to encode")
|
2022-05-18 05:28:57 +00:00
|
|
|
procID := uint32(0x24d32ee9)
|
|
|
|
procSequence := uint64(0x12346)
|
2021-10-27 23:21:44 +00:00
|
|
|
err := tstest.MinAllocsPerRun(t, 1, func() {
|
2022-05-18 05:28:57 +00:00
|
|
|
sink = lg.encodeText(inBuf, false, procID, procSequence, 0)
|
2020-04-04 21:16:33 +00:00
|
|
|
})
|
2021-10-27 23:21:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2020-04-04 21:16:33 +00:00
|
|
|
}
|
|
|
|
}
|
2020-07-25 10:40:18 +00:00
|
|
|
|
|
|
|
func TestLoggerWriteLength(t *testing.T) {
|
2020-12-21 17:03:39 +00:00
|
|
|
lg := &Logger{
|
2023-07-21 17:10:39 +00:00
|
|
|
clock: tstime.StdClock{},
|
|
|
|
buffer: NewMemoryBuffer(1024),
|
2020-07-25 10:40:18 +00:00
|
|
|
}
|
|
|
|
inBuf := []byte("some text to encode")
|
|
|
|
n, err := lg.Write(inBuf)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if n != len(inBuf) {
|
|
|
|
t.Errorf("logger.Write wrote %d bytes, expected %d", n, len(inBuf))
|
|
|
|
}
|
|
|
|
}
|
2021-01-12 21:31:45 +00:00
|
|
|
|
|
|
|
func TestParseAndRemoveLogLevel(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
log string
|
|
|
|
wantLevel int
|
|
|
|
wantLog string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"no level",
|
|
|
|
0,
|
|
|
|
"no level",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"[v1] level 1",
|
|
|
|
1,
|
|
|
|
"level 1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"level 1 [v1] ",
|
|
|
|
1,
|
|
|
|
"level 1 ",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"[v2] level 2",
|
|
|
|
2,
|
|
|
|
"level 2",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"level [v2] 2",
|
|
|
|
2,
|
|
|
|
"level 2",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"[v3] no level 3",
|
|
|
|
0,
|
|
|
|
"[v3] no level 3",
|
|
|
|
},
|
2022-02-18 04:41:49 +00:00
|
|
|
{
|
|
|
|
"some ignored text then [v\x00JSON]5{\"foo\":1234}",
|
|
|
|
5,
|
|
|
|
`{"foo":1234}`,
|
|
|
|
},
|
2021-01-12 21:31:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
gotLevel, gotLog := parseAndRemoveLogLevel([]byte(tt.log))
|
|
|
|
if gotLevel != tt.wantLevel {
|
|
|
|
t.Errorf("parseAndRemoveLogLevel(%q): got:%d; want %d",
|
|
|
|
tt.log, gotLevel, tt.wantLevel)
|
|
|
|
}
|
|
|
|
if string(gotLog) != tt.wantLog {
|
|
|
|
t.Errorf("parseAndRemoveLogLevel(%q): got:%q; want %q",
|
|
|
|
tt.log, gotLog, tt.wantLog)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-15 17:42:02 +00:00
|
|
|
|
2022-03-16 23:27:57 +00:00
|
|
|
func unmarshalOne(t *testing.T, body []byte) map[string]any {
|
2021-08-17 19:14:38 +00:00
|
|
|
t.Helper()
|
2022-03-16 23:27:57 +00:00
|
|
|
var entries []map[string]any
|
2021-08-17 19:14:38 +00:00
|
|
|
err := json.Unmarshal(body, &entries)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if len(entries) != 1 {
|
|
|
|
t.Fatalf("expected one entry, got %d", len(entries))
|
|
|
|
}
|
|
|
|
return entries[0]
|
|
|
|
}
|
2022-01-13 22:02:46 +00:00
|
|
|
|
|
|
|
func TestEncodeTextTruncation(t *testing.T) {
|
2023-07-21 17:10:39 +00:00
|
|
|
lg := &Logger{clock: tstime.StdClock{}, lowMem: true}
|
2023-02-08 05:15:12 +00:00
|
|
|
in := bytes.Repeat([]byte("a"), 5120)
|
2022-05-18 05:28:57 +00:00
|
|
|
b := lg.encodeText(in, true, 0, 0, 0)
|
2022-01-13 22:02:46 +00:00
|
|
|
got := string(b)
|
2023-02-08 05:15:12 +00:00
|
|
|
want := `{"text": "` + strings.Repeat("a", 4096) + `…+1024"}` + "\n"
|
2022-01-13 22:02:46 +00:00
|
|
|
if got != want {
|
|
|
|
t.Errorf("got:\n%qwant:\n%q\n", got, want)
|
|
|
|
}
|
|
|
|
}
|
2022-02-13 16:54:23 +00:00
|
|
|
|
|
|
|
type simpleMemBuf struct {
|
|
|
|
Buffer
|
|
|
|
buf bytes.Buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *simpleMemBuf) Write(p []byte) (n int, err error) { return b.buf.Write(p) }
|
|
|
|
|
|
|
|
func TestEncode(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
in string
|
|
|
|
want string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"normal",
|
2022-05-18 05:28:57 +00:00
|
|
|
`{"logtail": {"client_time": "1970-01-01T00:02:03.000000456Z","proc_id": 7,"proc_seq": 1}, "text": "normal"}` + "\n",
|
2022-02-13 16:54:23 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"and a [v1] level one",
|
2022-05-18 05:28:57 +00:00
|
|
|
`{"logtail": {"client_time": "1970-01-01T00:02:03.000000456Z","proc_id": 7,"proc_seq": 1}, "v":1,"text": "and a level one"}` + "\n",
|
2022-02-13 16:54:23 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"[v2] some verbose two",
|
2022-05-18 05:28:57 +00:00
|
|
|
`{"logtail": {"client_time": "1970-01-01T00:02:03.000000456Z","proc_id": 7,"proc_seq": 1}, "v":2,"text": "some verbose two"}` + "\n",
|
2022-02-13 16:54:23 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"{}",
|
2022-05-18 05:28:57 +00:00
|
|
|
`{"logtail":{"client_time":"1970-01-01T00:02:03.000000456Z","proc_id":7,"proc_seq":1}}` + "\n",
|
2022-02-13 16:54:23 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
`{"foo":"bar"}`,
|
2022-05-18 05:28:57 +00:00
|
|
|
`{"foo":"bar","logtail":{"client_time":"1970-01-01T00:02:03.000000456Z","proc_id":7,"proc_seq":1}}` + "\n",
|
2022-02-13 16:54:23 +00:00
|
|
|
},
|
2022-02-18 04:41:49 +00:00
|
|
|
{
|
|
|
|
"foo: [v\x00JSON]0{\"foo\":1}",
|
2022-05-18 05:28:57 +00:00
|
|
|
"{\"foo\":1,\"logtail\":{\"client_time\":\"1970-01-01T00:02:03.000000456Z\",\"proc_id\":7,\"proc_seq\":1}}\n",
|
2022-02-18 04:41:49 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"foo: [v\x00JSON]2{\"foo\":1}",
|
2022-05-18 05:28:57 +00:00
|
|
|
"{\"foo\":1,\"logtail\":{\"client_time\":\"1970-01-01T00:02:03.000000456Z\",\"proc_id\":7,\"proc_seq\":1},\"v\":2}\n",
|
2022-02-18 04:41:49 +00:00
|
|
|
},
|
2022-02-13 16:54:23 +00:00
|
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
|
|
buf := new(simpleMemBuf)
|
|
|
|
lg := &Logger{
|
2023-07-21 17:10:39 +00:00
|
|
|
clock: tstest.NewClock(tstest.ClockOpts{Start: time.Unix(123, 456).UTC()}),
|
2022-05-18 05:28:57 +00:00
|
|
|
buffer: buf,
|
|
|
|
procID: 7,
|
|
|
|
procSequence: 1,
|
2022-02-13 16:54:23 +00:00
|
|
|
}
|
|
|
|
io.WriteString(lg, tt.in)
|
|
|
|
got := buf.buf.String()
|
|
|
|
if got != tt.want {
|
|
|
|
t.Errorf("for %q,\n got: %#q\nwant: %#q\n", tt.in, got, tt.want)
|
|
|
|
}
|
|
|
|
if err := json.Compact(new(bytes.Buffer), buf.buf.Bytes()); err != nil {
|
|
|
|
t.Errorf("invalid output JSON for %q: %s", tt.in, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-11-08 19:48:36 +00:00
|
|
|
|
|
|
|
// Test that even if Logger.Write modifies the input buffer, we still return the
|
|
|
|
// length of the input buffer, not what we shrank it down to. Otherwise the
|
|
|
|
// caller will think we did a short write, violating the io.Writer contract.
|
|
|
|
func TestLoggerWriteResult(t *testing.T) {
|
|
|
|
buf := NewMemoryBuffer(100)
|
|
|
|
lg := &Logger{
|
|
|
|
clock: tstest.NewClock(tstest.ClockOpts{Start: time.Unix(123, 0)}),
|
|
|
|
buffer: buf,
|
|
|
|
}
|
|
|
|
|
|
|
|
const in = "[v1] foo"
|
|
|
|
n, err := lg.Write([]byte(in))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if got, want := n, len(in); got != want {
|
|
|
|
t.Errorf("Write = %v; want %v", got, want)
|
|
|
|
}
|
|
|
|
back, err := buf.TryReadLine()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if got, want := string(back), `{"logtail": {"client_time": "1970-01-01T00:02:03Z"}, "v":1,"text": "foo"}`+"\n"; got != want {
|
|
|
|
t.Errorf("mismatch.\n got: %#q\nwant: %#q", back, want)
|
|
|
|
}
|
|
|
|
}
|