smallzstd: delete unused package

As of the earlier 85febda86d, our new preferred zstd API of choice
is zstdframe.

Updates #cleanup
Updates tailscale/corp#18514

Change-Id: I5a6164d3162bf2513c3673b6d1e34cfae84cb104
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick
2025-09-28 12:46:45 -07:00
committed by Brad Fitzpatrick
parent 01e645fae1
commit a32102f741
3 changed files with 0 additions and 222 deletions

View File

@@ -1,14 +0,0 @@
{"logtail":{"client_time":"2020-07-01T14:49:40.196597018-07:00","server_time":"2020-07-01T21:49:40.198371511Z"},"text":"9.8M/25.6M magicsock: starting endpoint update (periodic)\n"}
{"logtail":{"client_time":"2020-07-01T14:49:40.345925455-07:00","server_time":"2020-07-01T21:49:40.347904717Z"},"text":"9.9M/25.6M netcheck: udp=true v6=false mapvarydest=false hair=false v4a=202.188.7.1:41641 derp=2 derpdist=1v4:7ms,2v4:3ms,4v4:18ms\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.347155742-07:00","server_time":"2020-07-01T21:49:43.34828658Z"},"text":"9.9M/25.6M control: map response long-poll timed out!\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.347539333-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.9M/25.6M control: PollNetMap: context canceled\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.347767812-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M control: sendStatus: mapRoutine1: state:authenticated\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.347817165-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M blockEngineUpdates(false)\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.347989028-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M wgcfg: [SViTM] skipping subnet route\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.349997554-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M Received error: PollNetMap: context canceled\n"}
{"logtail":{"client_time":"2020-07-01T14:49:43.350072606-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M control: mapRoutine: backoff: 30136 msec\n"}
{"logtail":{"client_time":"2020-07-01T14:49:47.998364646-07:00","server_time":"2020-07-01T21:49:47.999333754Z"},"text":"9.5M/25.6M [W1NbE] - [UcppE] Send handshake init [127.3.3.40:1, 6.1.1.6:37388*, 10.3.2.6:41641]\n"}
{"logtail":{"client_time":"2020-07-01T14:49:47.99881914-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: adding connection to derp-1 for [W1NbE]\n"}
{"logtail":{"client_time":"2020-07-01T14:49:47.998904932-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: 2 active derp conns: derp-1=cr0s,wr0s derp-2=cr16h0m0s,wr14h38m0s\n"}
{"logtail":{"client_time":"2020-07-01T14:49:47.999045606-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M derphttp.Client.Recv: connecting to derp-1 (nyc)\n"}
{"logtail":{"client_time":"2020-07-01T14:49:48.091104119-07:00","server_time":"2020-07-01T21:49:48.09280535Z"},"text":"9.6M/25.6M magicsock: rx [W1NbE] from 6.1.1.6:37388 (1/3), set as new priority\n"}

View File

@@ -1,78 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package smallzstd produces zstd encoders and decoders optimized for
// low memory usage, at the expense of compression efficiency.
//
// This package is optimized primarily for the memory cost of
// compressing and decompressing data. We reduce this cost in two
// major ways: disable parallelism within the library (i.e. don't use
// multiple CPU cores to decompress), and drop the compression window
// down from the defaults of 4-16MiB, to 8kiB.
//
// Decompressors cost 2x the window size in RAM to run, so by using an
// 8kiB window, we can run ~1000 more decompressors per unit of memory
// than with the defaults.
//
// Depending on context, the benefit is either being able to run more
// decoders (e.g. in our logs processing system), or having a lower
// memory footprint when using compression in network protocols
// (e.g. in tailscaled, which should have a minimal RAM cost).
package smallzstd
import (
"io"
"github.com/klauspost/compress/zstd"
)
// WindowSize is the window size used for zstd compression. Decoder
// memory usage scales linearly with WindowSize.
const WindowSize = 8 << 10 // 8kiB
// NewDecoder returns a zstd.Decoder configured for low memory usage,
// at the expense of decompression performance.
func NewDecoder(r io.Reader, options ...zstd.DOption) (*zstd.Decoder, error) {
defaults := []zstd.DOption{
// Default is GOMAXPROCS, which costs many KiB in stacks.
zstd.WithDecoderConcurrency(1),
// Default is to allocate more upfront for performance. We
// prefer lower memory use and a bit of GC load.
zstd.WithDecoderLowmem(true),
// You might expect to see zstd.WithDecoderMaxMemory
// here. However, it's not terribly safe to use if you're
// doing stateless decoding, because it sets the maximum
// amount of memory the decompressed data can occupy, rather
// than the window size of the zstd stream. This means a very
// compressible piece of data might violate the max memory
// limit here, even if the window size (and thus total memory
// required to decompress the data) is small.
//
// As a result, we don't set a decoder limit here, and rely on
// the encoder below producing "cheap" streams. Callers are
// welcome to set their own max memory setting, if
// contextually there is a clearly correct value (e.g. it's
// known from the upper layer protocol that the decoded data
// can never be more than 1MiB).
}
return zstd.NewReader(r, append(defaults, options...)...)
}
// NewEncoder returns a zstd.Encoder configured for low memory usage,
// both during compression and at decompression time, at the expense
// of performance and compression efficiency.
func NewEncoder(w io.Writer, options ...zstd.EOption) (*zstd.Encoder, error) {
defaults := []zstd.EOption{
// Default is GOMAXPROCS, which costs many KiB in stacks.
zstd.WithEncoderConcurrency(1),
// Default is several MiB, which bloats both encoders and
// their corresponding decoders.
zstd.WithWindowSize(WindowSize),
// Encode zero-length inputs in a way that the `zstd` utility
// can read, because interoperability is handy.
zstd.WithZeroFrames(true),
}
return zstd.NewWriter(w, append(defaults, options...)...)
}

View File

@@ -1,130 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package smallzstd
import (
"os"
"testing"
"github.com/klauspost/compress/zstd"
)
func BenchmarkSmallEncoder(b *testing.B) {
benchEncoder(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) })
}
func BenchmarkSmallEncoderWithBuild(b *testing.B) {
benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) })
}
func BenchmarkStockEncoder(b *testing.B) {
benchEncoder(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) })
}
func BenchmarkStockEncoderWithBuild(b *testing.B) {
benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) })
}
func BenchmarkSmallDecoder(b *testing.B) {
benchDecoder(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) })
}
func BenchmarkSmallDecoderWithBuild(b *testing.B) {
benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) })
}
func BenchmarkStockDecoder(b *testing.B) {
benchDecoder(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) })
}
func BenchmarkStockDecoderWithBuild(b *testing.B) {
benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) })
}
func benchEncoder(b *testing.B, mk func() (*zstd.Encoder, error)) {
b.ReportAllocs()
in := testdata(b)
out := make([]byte, 0, 10<<10) // 10kiB
e, err := mk()
if err != nil {
b.Fatalf("making encoder: %v", err)
}
b.ResetTimer()
for range b.N {
e.EncodeAll(in, out)
}
}
func benchEncoderWithConstruction(b *testing.B, mk func() (*zstd.Encoder, error)) {
b.ReportAllocs()
in := testdata(b)
out := make([]byte, 0, 10<<10) // 10kiB
b.ResetTimer()
for range b.N {
e, err := mk()
if err != nil {
b.Fatalf("making encoder: %v", err)
}
e.EncodeAll(in, out)
}
}
func benchDecoder(b *testing.B, mk func() (*zstd.Decoder, error)) {
b.ReportAllocs()
in := compressedTestdata(b)
out := make([]byte, 0, 10<<10)
d, err := mk()
if err != nil {
b.Fatalf("creating decoder: %v", err)
}
b.ResetTimer()
for range b.N {
d.DecodeAll(in, out)
}
}
func benchDecoderWithConstruction(b *testing.B, mk func() (*zstd.Decoder, error)) {
b.ReportAllocs()
in := compressedTestdata(b)
out := make([]byte, 0, 10<<10)
b.ResetTimer()
for range b.N {
d, err := mk()
if err != nil {
b.Fatalf("creating decoder: %v", err)
}
d.DecodeAll(in, out)
}
}
func testdata(b *testing.B) []byte {
b.Helper()
in, err := os.ReadFile("testdata")
if err != nil {
b.Fatalf("reading testdata: %v", err)
}
return in
}
func compressedTestdata(b *testing.B) []byte {
b.Helper()
uncomp := testdata(b)
e, err := NewEncoder(nil)
if err != nil {
b.Fatalf("creating encoder: %v", err)
}
return e.EncodeAll(uncomp, nil)
}