tailscale/smallzstd/zstd.go
David Anderson 9cd4e65191 smallzstd: new package that constructs zstd small encoders/decoders.
It's just a config wrapper that passes "use less memory at the
expense of compression" parameters by default, so that we don't
accidentally construct resource-hungry (de)compressors.

Also includes a benchmark that measures the memory cost of the
small variants vs. the stock variants. The savings are significant
on both compressors (~8x less memory) and decompressors (~1.4x less,
not including the savings from the significantly smaller
window on the compression side - with those savings included it's
more like ~140x smaller).

BenchmarkSmallEncoder-8            	   56174	     19354 ns/op	      31 B/op	       0 allocs/op
BenchmarkSmallEncoderWithBuild-8   	    2900	    382940 ns/op	 1746547 B/op	      36 allocs/op
BenchmarkStockEncoder-8            	   48921	     25761 ns/op	     286 B/op	       0 allocs/op
BenchmarkStockEncoderWithBuild-8   	     426	   2630241 ns/op	13843842 B/op	     124 allocs/op
BenchmarkSmallDecoder-8            	  123814	      9344 ns/op	       0 B/op	       0 allocs/op
BenchmarkSmallDecoderWithBuild-8   	   41547	     27455 ns/op	   27694 B/op	      31 allocs/op
BenchmarkStockDecoder-8            	  129832	      9417 ns/op	       1 B/op	       0 allocs/op
BenchmarkStockDecoderWithBuild-8   	   25561	     51751 ns/op	   39607 B/op	      92 allocs/op

Signed-off-by: David Anderson <danderson@tailscale.com>
2020-07-02 16:13:06 -07:00

80 lines
3.3 KiB
Go

// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package smallzstd produces zstd encoders and decoders optimized for
// low memory usage, at the expense of compression efficiency.
//
// This package is optimized primarily for the memory cost of
// compressing and decompressing data. We reduce this cost in two
// major ways: disable parallelism within the library (i.e. don't use
// multiple CPU cores to decompress), and drop the compression window
// down from the defaults of 4-16MiB, to 8kiB.
//
// Decompressors cost 2x the window size in RAM to run, so by using an
// 8kiB window, we can run ~1000 more decompressors per unit of memory
// than with the defaults.
//
// Depending on context, the benefit is either being able to run more
// decoders (e.g. in our logs processing system), or having a lower
// memory footprint when using compression in network protocols
// (e.g. in tailscaled, which should have a minimal RAM cost).
package smallzstd
import (
"io"
"github.com/klauspost/compress/zstd"
)
// WindowSize is the window size used for zstd compression. Decoder
// memory usage scales linearly with WindowSize.
const WindowSize = 8 << 10 // 8kiB
// NewDecoder returns a zstd.Decoder configured for low memory usage,
// at the expense of decompression performance.
func NewDecoder(r io.Reader, options ...zstd.DOption) (*zstd.Decoder, error) {
defaults := []zstd.DOption{
// Default is GOMAXPROCS, which costs many KiB in stacks.
zstd.WithDecoderConcurrency(1),
// Default is to allocate more upfront for performance. We
// prefer lower memory use and a bit of GC load.
zstd.WithDecoderLowmem(true),
// You might expect to see zstd.WithDecoderMaxMemory
// here. However, it's not terribly safe to use if you're
// doing stateless decoding, because it sets the maximum
// amount of memory the decompressed data can occupy, rather
// than the window size of the zstd stream. This means a very
// compressible piece of data might violate the max memory
// limit here, even if the window size (and thus total memory
// required to decompress the data) is small.
//
// As a result, we don't set a decoder limit here, and rely on
// the encoder below producing "cheap" streams. Callers are
// welcome to set their own max memory setting, if
// contextually there is a clearly correct value (e.g. it's
// known from the upper layer protocol that the decoded data
// can never be more than 1MiB).
}
return zstd.NewReader(r, append(defaults, options...)...)
}
// NewEncoder returns a zstd.Encoder configured for low memory usage,
// both during compression and at decompression time, at the expense
// of performance and compression efficiency.
func NewEncoder(w io.Writer, options ...zstd.EOption) (*zstd.Encoder, error) {
defaults := []zstd.EOption{
// Default is GOMAXPROCS, which costs many KiB in stacks.
zstd.WithEncoderConcurrency(1),
// Default is several MiB, which bloats both encoders and
// their corresponding decoders.
zstd.WithWindowSize(WindowSize),
// Encode zero-length inputs in a way that the `zstd` utility
// can read, because interoperability is handy.
zstd.WithZeroFrames(true),
}
return zstd.NewWriter(w, append(defaults, options...)...)
}