2020-06-30 02:36:45 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2021-07-21 16:23:04 +00:00
|
|
|
// Package deephash hashes a Go value recursively, in a predictable order,
|
|
|
|
// without looping. The hash is only valid within the lifetime of a program.
|
|
|
|
// Users should not store the hash on disk or send it over the network.
|
|
|
|
// The hash is sufficiently strong and unique such that
|
|
|
|
// Hash(x) == Hash(y) is an appropriate replacement for x == y.
|
2021-07-03 04:30:29 +00:00
|
|
|
//
|
|
|
|
// This package, like most of the tailscale.com Go module, should be
|
|
|
|
// considered Tailscale-internal; we make no API promises.
|
2021-05-11 19:09:25 +00:00
|
|
|
package deephash
|
2020-06-28 17:58:21 +00:00
|
|
|
|
|
|
|
import (
|
2021-05-10 20:29:56 +00:00
|
|
|
"bufio"
|
2020-06-28 17:58:21 +00:00
|
|
|
"crypto/sha256"
|
2021-07-06 05:13:33 +00:00
|
|
|
"encoding/binary"
|
2021-05-19 18:51:21 +00:00
|
|
|
"encoding/hex"
|
2020-06-28 17:58:21 +00:00
|
|
|
"fmt"
|
2021-05-11 20:17:12 +00:00
|
|
|
"hash"
|
2021-07-06 04:28:54 +00:00
|
|
|
"math"
|
2020-06-28 17:58:21 +00:00
|
|
|
"reflect"
|
2021-05-19 18:51:21 +00:00
|
|
|
"strconv"
|
2021-05-11 20:17:12 +00:00
|
|
|
"sync"
|
2021-07-21 16:23:04 +00:00
|
|
|
"time"
|
2020-06-28 17:58:21 +00:00
|
|
|
)
|
|
|
|
|
2021-07-06 05:13:33 +00:00
|
|
|
const scratchSize = 128
|
|
|
|
|
2021-07-05 04:25:15 +00:00
|
|
|
// hasher is reusable state for hashing a value.
|
|
|
|
// Get one via hasherPool.
|
|
|
|
type hasher struct {
|
|
|
|
h hash.Hash
|
|
|
|
bw *bufio.Writer
|
2021-07-06 05:13:33 +00:00
|
|
|
scratch [scratchSize]byte
|
2021-07-07 05:37:32 +00:00
|
|
|
visited map[uintptr]bool
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
2021-07-05 04:25:15 +00:00
|
|
|
// newHasher initializes a new hasher, for use by hasherPool.
|
|
|
|
func newHasher() *hasher {
|
2021-07-07 05:37:32 +00:00
|
|
|
h := &hasher{
|
|
|
|
h: sha256.New(),
|
|
|
|
visited: map[uintptr]bool{},
|
|
|
|
}
|
2021-07-05 04:25:15 +00:00
|
|
|
h.bw = bufio.NewWriterSize(h.h, h.h.BlockSize())
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
2021-07-07 05:37:32 +00:00
|
|
|
// setBufioWriter switches the bufio writer to w after flushing
|
|
|
|
// any output to the old one. It then also returns the old one, so
|
|
|
|
// the caller can switch back to it.
|
|
|
|
func (h *hasher) setBufioWriter(w *bufio.Writer) (old *bufio.Writer) {
|
|
|
|
old = h.bw
|
|
|
|
old.Flush()
|
|
|
|
h.bw = w
|
|
|
|
return old
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Sum is an opaque checksum type that is comparable.
|
|
|
|
type Sum struct {
|
|
|
|
sum [sha256.Size]byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s Sum) String() string {
|
|
|
|
return hex.EncodeToString(s.sum[:])
|
|
|
|
}
|
|
|
|
|
2021-07-21 16:23:04 +00:00
|
|
|
var (
|
|
|
|
once sync.Once
|
|
|
|
seed uint64
|
|
|
|
)
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Hash returns the hash of v.
|
|
|
|
func (h *hasher) Hash(v interface{}) (hash Sum) {
|
2021-07-05 04:25:15 +00:00
|
|
|
h.bw.Flush()
|
|
|
|
h.h.Reset()
|
2021-07-21 16:23:04 +00:00
|
|
|
once.Do(func() {
|
|
|
|
seed = uint64(time.Now().UnixNano())
|
|
|
|
})
|
|
|
|
h.uint(seed)
|
2021-07-07 05:37:32 +00:00
|
|
|
h.print(reflect.ValueOf(v))
|
2021-07-05 04:25:15 +00:00
|
|
|
h.bw.Flush()
|
2021-07-06 05:13:33 +00:00
|
|
|
// Sum into scratch & copy out, as hash.Hash is an interface
|
|
|
|
// so the slice necessarily escapes, and there's no sha256
|
|
|
|
// concrete type exported and we don't want the 'hash' result
|
|
|
|
// parameter to escape to the heap:
|
|
|
|
h.h.Sum(h.scratch[:0])
|
2021-07-20 05:49:51 +00:00
|
|
|
copy(hash.sum[:], h.scratch[:])
|
2021-07-06 05:13:33 +00:00
|
|
|
return
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var hasherPool = &sync.Pool{
|
|
|
|
New: func() interface{} { return newHasher() },
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Hash returns the hash of v.
|
|
|
|
func Hash(v interface{}) Sum {
|
2021-07-07 05:37:32 +00:00
|
|
|
h := hasherPool.Get().(*hasher)
|
|
|
|
defer hasherPool.Put(h)
|
|
|
|
for k := range h.visited {
|
|
|
|
delete(h.visited, k)
|
|
|
|
}
|
|
|
|
return h.Hash(v)
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Update sets last to the hash of v and reports whether its value changed.
|
|
|
|
func Update(last *Sum, v ...interface{}) (changed bool) {
|
2021-07-05 04:25:15 +00:00
|
|
|
sum := Hash(v)
|
2021-07-20 05:49:51 +00:00
|
|
|
if sum == *last {
|
2021-07-05 04:25:15 +00:00
|
|
|
// unchanged.
|
|
|
|
return false
|
|
|
|
}
|
2021-07-20 05:49:51 +00:00
|
|
|
*last = sum
|
2021-07-05 04:25:15 +00:00
|
|
|
return true
|
2020-07-29 01:47:23 +00:00
|
|
|
}
|
|
|
|
|
2021-05-24 22:13:18 +00:00
|
|
|
var appenderToType = reflect.TypeOf((*appenderTo)(nil)).Elem()
|
2021-05-10 21:15:31 +00:00
|
|
|
|
2021-05-24 21:31:24 +00:00
|
|
|
type appenderTo interface {
|
|
|
|
AppendTo([]byte) []byte
|
|
|
|
}
|
|
|
|
|
2021-07-06 05:13:33 +00:00
|
|
|
func (h *hasher) uint(i uint64) {
|
|
|
|
binary.BigEndian.PutUint64(h.scratch[:8], i)
|
|
|
|
h.bw.Write(h.scratch[:8])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hasher) int(i int) {
|
|
|
|
binary.BigEndian.PutUint64(h.scratch[:8], uint64(i))
|
|
|
|
h.bw.Write(h.scratch[:8])
|
|
|
|
}
|
|
|
|
|
2021-07-07 18:58:02 +00:00
|
|
|
var uint8Type = reflect.TypeOf(byte(0))
|
|
|
|
|
2021-05-11 20:17:12 +00:00
|
|
|
// print hashes v into w.
|
|
|
|
// It reports whether it was able to do so without hitting a cycle.
|
2021-07-07 05:37:32 +00:00
|
|
|
func (h *hasher) print(v reflect.Value) (acyclic bool) {
|
2020-06-28 17:58:21 +00:00
|
|
|
if !v.IsValid() {
|
2021-05-11 20:17:12 +00:00
|
|
|
return true
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2021-05-10 21:15:31 +00:00
|
|
|
|
2021-07-07 05:37:32 +00:00
|
|
|
w := h.bw
|
|
|
|
visited := h.visited
|
|
|
|
|
2021-05-10 21:15:31 +00:00
|
|
|
if v.CanInterface() {
|
2021-05-24 21:31:24 +00:00
|
|
|
// Use AppendTo methods, if available and cheap.
|
|
|
|
if v.CanAddr() && v.Type().Implements(appenderToType) {
|
|
|
|
a := v.Addr().Interface().(appenderTo)
|
2021-07-21 18:29:08 +00:00
|
|
|
size := h.scratch[:8]
|
|
|
|
record := a.AppendTo(size)
|
|
|
|
binary.LittleEndian.PutUint64(record, uint64(len(record)-len(size)))
|
|
|
|
w.Write(record)
|
2021-05-18 19:44:31 +00:00
|
|
|
return true
|
2021-05-24 21:31:24 +00:00
|
|
|
}
|
2021-05-10 21:15:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generic handling.
|
2020-06-28 17:58:21 +00:00
|
|
|
switch v.Kind() {
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unhandled kind %v for type %v", v.Kind(), v.Type()))
|
|
|
|
case reflect.Ptr:
|
|
|
|
ptr := v.Pointer()
|
|
|
|
if visited[ptr] {
|
2021-05-11 20:17:12 +00:00
|
|
|
return false
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
|
|
|
visited[ptr] = true
|
2021-07-07 05:37:32 +00:00
|
|
|
return h.print(v.Elem())
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Struct:
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = true
|
2021-07-06 05:13:33 +00:00
|
|
|
w.WriteString("struct")
|
|
|
|
h.int(v.NumField())
|
2020-06-28 17:58:21 +00:00
|
|
|
for i, n := 0, v.NumField(); i < n; i++ {
|
2021-07-06 05:13:33 +00:00
|
|
|
h.int(i)
|
2021-07-07 05:37:32 +00:00
|
|
|
if !h.print(v.Field(i)) {
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = false
|
|
|
|
}
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2021-05-11 20:17:12 +00:00
|
|
|
return acyclic
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Slice, reflect.Array:
|
2021-07-06 05:13:33 +00:00
|
|
|
vLen := v.Len()
|
|
|
|
if v.Kind() == reflect.Slice {
|
|
|
|
h.int(vLen)
|
|
|
|
}
|
2021-07-07 18:58:02 +00:00
|
|
|
if v.Type().Elem() == uint8Type && v.CanInterface() {
|
2021-07-06 05:13:33 +00:00
|
|
|
if vLen > 0 && vLen <= scratchSize {
|
|
|
|
// If it fits in scratch, avoid the Interface allocation.
|
|
|
|
// It seems tempting to do this for all sizes, doing
|
|
|
|
// scratchSize bytes at a time, but reflect.Slice seems
|
|
|
|
// to allocate, so it's not a win.
|
|
|
|
n := reflect.Copy(reflect.ValueOf(&h.scratch).Elem(), v)
|
|
|
|
w.Write(h.scratch[:n])
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, "%s", v.Interface())
|
2021-05-11 20:17:12 +00:00
|
|
|
return true
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = true
|
2021-07-06 05:13:33 +00:00
|
|
|
for i := 0; i < vLen; i++ {
|
|
|
|
h.int(i)
|
2021-07-07 05:37:32 +00:00
|
|
|
if !h.print(v.Index(i)) {
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = false
|
|
|
|
}
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2021-05-11 20:17:12 +00:00
|
|
|
return acyclic
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Interface:
|
2021-07-21 17:26:04 +00:00
|
|
|
if v.IsNil() {
|
|
|
|
w.WriteByte(0) // indicates nil
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
v = v.Elem()
|
|
|
|
|
|
|
|
w.WriteByte(1) // indicates visiting interface value
|
|
|
|
h.hashType(v.Type())
|
|
|
|
return h.print(v)
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Map:
|
2021-07-07 04:41:18 +00:00
|
|
|
// TODO(bradfitz): ideally we'd avoid these map
|
|
|
|
// operations to detect cycles if we knew from the map
|
|
|
|
// element type that there no way to form a cycle,
|
|
|
|
// which is the common case. Notably, we don't care
|
|
|
|
// about hashing the same map+contents twice in
|
|
|
|
// different parts of the tree. In fact, we should
|
|
|
|
// ideally. (And this prevents it) We should only stop
|
|
|
|
// hashing when there's a cycle. What we should
|
|
|
|
// probably do is make sure we enumerate the data
|
|
|
|
// structure tree is a fixed order and then give each
|
|
|
|
// pointer an increasing number, and when we hit a
|
|
|
|
// dup, rather than emitting nothing, we should emit a
|
|
|
|
// "value #12" reference. Which implies that all things
|
|
|
|
// emit to the bufio.Writer should be type-tagged so
|
|
|
|
// we can distinguish loop references without risk of
|
|
|
|
// collisions.
|
|
|
|
ptr := v.Pointer()
|
|
|
|
if visited[ptr] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
visited[ptr] = true
|
|
|
|
|
2021-07-07 05:37:32 +00:00
|
|
|
if h.hashMapAcyclic(v) {
|
2021-05-11 20:17:12 +00:00
|
|
|
return true
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2021-07-07 05:37:32 +00:00
|
|
|
return h.hashMapFallback(v)
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.String:
|
2021-07-06 05:13:33 +00:00
|
|
|
h.int(v.Len())
|
2021-05-10 20:29:56 +00:00
|
|
|
w.WriteString(v.String())
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Bool:
|
2021-07-07 05:37:32 +00:00
|
|
|
w.Write(strconv.AppendBool(h.scratch[:0], v.Bool()))
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
2021-07-07 05:37:32 +00:00
|
|
|
w.Write(strconv.AppendInt(h.scratch[:0], v.Int(), 10))
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
2021-07-06 05:13:33 +00:00
|
|
|
h.uint(v.Uint())
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Float32, reflect.Float64:
|
2021-07-07 05:37:32 +00:00
|
|
|
w.Write(strconv.AppendUint(h.scratch[:0], math.Float64bits(v.Float()), 10))
|
2020-06-28 17:58:21 +00:00
|
|
|
case reflect.Complex64, reflect.Complex128:
|
|
|
|
fmt.Fprintf(w, "%v", v.Complex())
|
|
|
|
}
|
2021-05-11 20:17:12 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
type mapHasher struct {
|
|
|
|
xbuf [sha256.Size]byte // XOR'ed accumulated buffer
|
|
|
|
ebuf [sha256.Size]byte // scratch buffer
|
|
|
|
s256 hash.Hash // sha256 hash.Hash
|
|
|
|
bw *bufio.Writer // to hasher into ebuf
|
2021-05-18 16:20:52 +00:00
|
|
|
val valueCache // re-usable values for map iteration
|
2021-05-24 21:24:39 +00:00
|
|
|
iter *reflect.MapIter // re-usable map iterator
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (mh *mapHasher) Reset() {
|
|
|
|
for i := range mh.xbuf {
|
|
|
|
mh.xbuf[i] = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mh *mapHasher) startEntry() {
|
|
|
|
for i := range mh.ebuf {
|
|
|
|
mh.ebuf[i] = 0
|
|
|
|
}
|
|
|
|
mh.bw.Flush()
|
|
|
|
mh.s256.Reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mh *mapHasher) endEntry() {
|
|
|
|
mh.bw.Flush()
|
|
|
|
for i, b := range mh.s256.Sum(mh.ebuf[:0]) {
|
|
|
|
mh.xbuf[i] ^= b
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var mapHasherPool = &sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
mh := new(mapHasher)
|
|
|
|
mh.s256 = sha256.New()
|
|
|
|
mh.bw = bufio.NewWriter(mh.s256)
|
2021-05-18 16:20:52 +00:00
|
|
|
mh.val = make(valueCache)
|
2021-05-24 21:24:39 +00:00
|
|
|
mh.iter = new(reflect.MapIter)
|
2021-05-11 20:17:12 +00:00
|
|
|
return mh
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-05-18 16:20:52 +00:00
|
|
|
type valueCache map[reflect.Type]reflect.Value
|
|
|
|
|
|
|
|
func (c valueCache) get(t reflect.Type) reflect.Value {
|
|
|
|
v, ok := c[t]
|
|
|
|
if !ok {
|
|
|
|
v = reflect.New(t).Elem()
|
|
|
|
c[t] = v
|
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2021-05-11 20:17:12 +00:00
|
|
|
// hashMapAcyclic is the faster sort-free version of map hashing. If
|
|
|
|
// it detects a cycle it returns false and guarantees that nothing was
|
|
|
|
// written to w.
|
2021-07-07 05:37:32 +00:00
|
|
|
func (h *hasher) hashMapAcyclic(v reflect.Value) (acyclic bool) {
|
2021-05-11 20:17:12 +00:00
|
|
|
mh := mapHasherPool.Get().(*mapHasher)
|
|
|
|
defer mapHasherPool.Put(mh)
|
|
|
|
mh.Reset()
|
2021-05-24 21:24:39 +00:00
|
|
|
iter := mapIter(mh.iter, v)
|
|
|
|
defer mapIter(mh.iter, reflect.Value{}) // avoid pinning v from mh.iter when we return
|
2021-07-07 05:37:32 +00:00
|
|
|
|
|
|
|
// Temporarily switch to the map hasher's bufio.Writer.
|
|
|
|
oldw := h.setBufioWriter(mh.bw)
|
|
|
|
defer h.setBufioWriter(oldw)
|
|
|
|
|
2021-05-18 16:20:52 +00:00
|
|
|
k := mh.val.get(v.Type().Key())
|
|
|
|
e := mh.val.get(v.Type().Elem())
|
2021-05-11 20:17:12 +00:00
|
|
|
for iter.Next() {
|
2021-05-17 19:46:17 +00:00
|
|
|
key := iterKey(iter, k)
|
|
|
|
val := iterVal(iter, e)
|
2021-05-11 20:17:12 +00:00
|
|
|
mh.startEntry()
|
2021-07-07 05:37:32 +00:00
|
|
|
if !h.print(key) {
|
2021-05-11 20:17:12 +00:00
|
|
|
return false
|
|
|
|
}
|
2021-07-07 05:37:32 +00:00
|
|
|
if !h.print(val) {
|
2021-05-11 20:17:12 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
mh.endEntry()
|
|
|
|
}
|
2021-07-07 05:37:32 +00:00
|
|
|
oldw.Write(mh.xbuf[:])
|
2021-05-11 20:17:12 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-07-07 05:37:32 +00:00
|
|
|
func (h *hasher) hashMapFallback(v reflect.Value) (acyclic bool) {
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = true
|
|
|
|
sm := newSortedMap(v)
|
2021-07-07 05:37:32 +00:00
|
|
|
w := h.bw
|
2021-05-11 20:17:12 +00:00
|
|
|
fmt.Fprintf(w, "map[%d]{\n", len(sm.Key))
|
|
|
|
for i, k := range sm.Key {
|
2021-07-07 05:37:32 +00:00
|
|
|
if !h.print(k) {
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = false
|
|
|
|
}
|
|
|
|
w.WriteString(": ")
|
2021-07-07 05:37:32 +00:00
|
|
|
if !h.print(sm.Value[i]) {
|
2021-05-11 20:17:12 +00:00
|
|
|
acyclic = false
|
|
|
|
}
|
|
|
|
w.WriteString("\n")
|
|
|
|
}
|
|
|
|
w.WriteString("}\n")
|
|
|
|
return acyclic
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2021-07-21 17:26:04 +00:00
|
|
|
|
|
|
|
// hashType hashes a reflect.Type.
|
|
|
|
// The hash is only consistent within the lifetime of a program.
|
|
|
|
func (h *hasher) hashType(t reflect.Type) {
|
|
|
|
// This approach relies on reflect.Type always being backed by a unique
|
|
|
|
// *reflect.rtype pointer. A safer approach is to use a global sync.Map
|
|
|
|
// that maps reflect.Type to some arbitrary and unique index.
|
|
|
|
// While safer, it requires global state with memory that can never be GC'd.
|
|
|
|
rtypeAddr := reflect.ValueOf(t).Pointer() // address of *reflect.rtype
|
|
|
|
h.uint(uint64(rtypeAddr))
|
|
|
|
}
|