2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2020-06-30 02:36:45 +00:00
|
|
|
|
2021-07-21 16:23:04 +00:00
|
|
|
// Package deephash hashes a Go value recursively, in a predictable order,
|
|
|
|
// without looping. The hash is only valid within the lifetime of a program.
|
|
|
|
// Users should not store the hash on disk or send it over the network.
|
|
|
|
// The hash is sufficiently strong and unique such that
|
2022-08-27 23:08:31 +00:00
|
|
|
// Hash(&x) == Hash(&y) is an appropriate replacement for x == y.
|
2021-07-03 04:30:29 +00:00
|
|
|
//
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// The definition of equality is identical to reflect.DeepEqual except:
|
2022-08-02 16:33:46 +00:00
|
|
|
// - Floating-point values are compared based on the raw bits,
|
|
|
|
// which means that NaNs (with the same bit pattern) are treated as equal.
|
2022-08-12 21:42:51 +00:00
|
|
|
// - time.Time are compared based on whether they are the same instant in time
|
|
|
|
// and also in the same zone offset. Monotonic measurements and zone names
|
|
|
|
// are ignored as part of the hash.
|
2022-08-24 08:31:01 +00:00
|
|
|
// - netip.Addr are compared based on a shallow comparison of the struct.
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
//
|
|
|
|
// WARNING: This package, like most of the tailscale.com Go module,
|
|
|
|
// should be considered Tailscale-internal; we make no API promises.
|
2024-02-01 21:49:36 +00:00
|
|
|
//
|
|
|
|
// # Cycle detection
|
|
|
|
//
|
|
|
|
// This package correctly handles cycles in the value graph,
|
|
|
|
// but in a way that is potentially pathological in some situations.
|
|
|
|
//
|
|
|
|
// The algorithm for cycle detection operates by
|
|
|
|
// pushing a pointer onto a stack whenever deephash is visiting a pointer and
|
|
|
|
// popping the pointer from the stack after deephash is leaving the pointer.
|
|
|
|
// Before visiting a new pointer, deephash checks whether it has already been
|
|
|
|
// visited on the pointer stack. If so, it hashes the index of the pointer
|
|
|
|
// on the stack and avoids visiting the pointer.
|
|
|
|
//
|
|
|
|
// This algorithm is guaranteed to detect cycles, but may expand pointers
|
|
|
|
// more often than a potential alternate algorithm that remembers all pointers
|
|
|
|
// ever visited in a map. The current algorithm uses O(D) memory, where D
|
|
|
|
// is the maximum depth of the recursion, while the alternate algorithm
|
|
|
|
// would use O(P) memory where P is all pointers ever seen, which can be a lot,
|
|
|
|
// and most of which may have nothing to do with cycles.
|
|
|
|
// Also, the alternate algorithm has to deal with challenges of producing
|
|
|
|
// deterministic results when pointers are visited in non-deterministic ways
|
|
|
|
// such as when iterating through a Go map. The stack-based algorithm avoids
|
|
|
|
// this challenge since the stack is always deterministic regardless of
|
|
|
|
// non-deterministic iteration order of Go maps.
|
|
|
|
//
|
|
|
|
// To concretely see how this algorithm can be pathological,
|
|
|
|
// consider the following data structure:
|
|
|
|
//
|
|
|
|
// var big *Item = ... // some large data structure that is slow to hash
|
|
|
|
// var manyBig []*Item
|
|
|
|
// for i := 0; i < 1000; i++ {
|
|
|
|
// manyBig = append(manyBig, &big)
|
|
|
|
// }
|
|
|
|
// deephash.Hash(manyBig)
|
|
|
|
//
|
|
|
|
// Here, the manyBig data structure is not even cyclic.
|
|
|
|
// We have the same big *Item being stored multiple times in a []*Item.
|
|
|
|
// When deephash hashes []*Item, it hashes each individual *Item
|
|
|
|
// not realizing that it had just done the computation earlier.
|
|
|
|
// To avoid the pathological situation, Item should implement [SelfHasher] and
|
|
|
|
// memoize attempts to hash itself.
|
2021-05-11 19:09:25 +00:00
|
|
|
package deephash
|
2020-06-28 17:58:21 +00:00
|
|
|
|
2024-02-01 21:49:36 +00:00
|
|
|
// TODO: Add option to teach deephash to memoize the Hash result of particular types?
|
|
|
|
|
2020-06-28 17:58:21 +00:00
|
|
|
import (
|
|
|
|
"crypto/sha256"
|
2021-07-06 05:13:33 +00:00
|
|
|
"encoding/binary"
|
2021-05-19 18:51:21 +00:00
|
|
|
"encoding/hex"
|
2023-08-27 17:09:06 +00:00
|
|
|
"fmt"
|
2020-06-28 17:58:21 +00:00
|
|
|
"reflect"
|
2021-05-11 20:17:12 +00:00
|
|
|
"sync"
|
2021-07-21 16:23:04 +00:00
|
|
|
"time"
|
2022-08-12 00:44:09 +00:00
|
|
|
|
2022-08-16 20:15:33 +00:00
|
|
|
"tailscale.com/util/hashx"
|
2023-08-27 17:09:06 +00:00
|
|
|
"tailscale.com/util/set"
|
2020-06-28 17:58:21 +00:00
|
|
|
)
|
|
|
|
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// There is much overlap between the theory of serialization and hashing.
|
2021-08-24 14:36:48 +00:00
|
|
|
// A hash (useful for determining equality) can be produced by printing a value
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// and hashing the output. The format must:
|
|
|
|
// * be deterministic such that the same value hashes to the same output, and
|
|
|
|
// * be parsable such that the same value can be reproduced by the output.
|
|
|
|
//
|
|
|
|
// The logic below hashes a value by printing it to a hash.Hash.
|
|
|
|
// To be parsable, it assumes that we know the Go type of each value:
|
2022-08-28 00:39:51 +00:00
|
|
|
// * scalar types (e.g., bool or int32) are directly printed as their
|
|
|
|
// underlying memory representation.
|
|
|
|
// * list types (e.g., strings and slices) are prefixed by a
|
|
|
|
// fixed-width length field, followed by the contents of the list.
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// * slices, arrays, and structs print each element/field consecutively.
|
|
|
|
// * interfaces print with a 1-byte prefix indicating whether it is nil.
|
|
|
|
// If non-nil, it is followed by a fixed-width field of the type index,
|
|
|
|
// followed by the format of the underlying value.
|
|
|
|
// * pointers print with a 1-byte prefix indicating whether the pointer is
|
|
|
|
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers are
|
|
|
|
// followed by a fixed-width field with the index of the previous pointer.
|
|
|
|
// Newly seen pointers are followed by the format of the underlying value.
|
|
|
|
// * maps print with a 1-byte prefix indicating whether the map pointer is
|
|
|
|
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers
|
|
|
|
// are followed by a fixed-width field of the index of the previous pointer.
|
2022-08-28 00:39:51 +00:00
|
|
|
// Newly seen maps are printed with a fixed-width length field, followed by
|
|
|
|
// a fixed-width field with the XOR of the hash of every map entry.
|
|
|
|
// With a sufficiently strong hash, this value is theoretically "parsable"
|
|
|
|
// by looking up the hash in a magical map that returns the set of entries
|
|
|
|
// for that given hash.
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
|
2024-02-02 01:07:41 +00:00
|
|
|
// SelfHasher is implemented by types that can compute their own hash
|
|
|
|
// by writing values through the provided [Hasher] parameter.
|
|
|
|
// Implementations must not leak the provided [Hasher].
|
|
|
|
//
|
|
|
|
// If the implementation of SelfHasher recursively calls [deephash.Hash],
|
|
|
|
// then infinite recursion is quite likely to occur.
|
|
|
|
// To avoid this, use a type definition to drop methods before calling [deephash.Hash]:
|
|
|
|
//
|
|
|
|
// func (v *MyType) Hash(h deephash.Hasher) {
|
|
|
|
// v.hashMu.Lock()
|
|
|
|
// defer v.hashMu.Unlock()
|
|
|
|
// if v.dirtyHash {
|
|
|
|
// type MyTypeWithoutMethods MyType // type define MyType to drop Hash method
|
|
|
|
// v.dirtyHash = false // clear out dirty bit to avoid hashing over it
|
|
|
|
// v.hashSum = deephash.Sum{} // clear out hashSum to avoid hashing over it
|
|
|
|
// v.hashSum = deephash.Hash((*MyTypeWithoutMethods)(v))
|
|
|
|
// }
|
|
|
|
// h.HashSum(v.hashSum)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// In the above example, we acquire a lock since it is possible that deephash
|
|
|
|
// is called in a concurrent manner, which implies that MyType.Hash may also
|
|
|
|
// be called in a concurrent manner. Whether this lock is necessary is
|
|
|
|
// application-dependent and left as an exercise to the reader.
|
|
|
|
// Also, the example assumes that dirtyHash is set elsewhere by application
|
|
|
|
// logic whenever a mutation is made to MyType that would alter the hash.
|
|
|
|
type SelfHasher interface {
|
|
|
|
Hash(Hasher)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hasher is a value passed to [SelfHasher.Hash] that allow implementations
|
|
|
|
// to hash themselves in a structured manner.
|
|
|
|
type Hasher struct{ h *hashx.Block512 }
|
|
|
|
|
|
|
|
// HashBytes hashes a sequence of bytes b.
|
|
|
|
// The length of b is not explicitly hashed.
|
|
|
|
func (h Hasher) HashBytes(b []byte) { h.h.HashBytes(b) }
|
|
|
|
|
|
|
|
// HashString hashes the string data of s
|
|
|
|
// The length of s is not explicitly hashed.
|
|
|
|
func (h Hasher) HashString(s string) { h.h.HashString(s) }
|
|
|
|
|
|
|
|
// HashUint8 hashes a uint8.
|
|
|
|
func (h Hasher) HashUint8(n uint8) { h.h.HashUint8(n) }
|
|
|
|
|
|
|
|
// HashUint16 hashes a uint16.
|
|
|
|
func (h Hasher) HashUint16(n uint16) { h.h.HashUint16(n) }
|
|
|
|
|
|
|
|
// HashUint32 hashes a uint32.
|
|
|
|
func (h Hasher) HashUint32(n uint32) { h.h.HashUint32(n) }
|
|
|
|
|
|
|
|
// HashUint64 hashes a uint64.
|
|
|
|
func (h Hasher) HashUint64(n uint64) { h.h.HashUint64(n) }
|
|
|
|
|
|
|
|
// HashSum hashes a [Sum].
|
|
|
|
func (h Hasher) HashSum(s Sum) {
|
|
|
|
// NOTE: Avoid calling h.HashBytes since it escapes b,
|
|
|
|
// which would force s to be heap allocated.
|
|
|
|
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[0:8]))
|
|
|
|
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[8:16]))
|
|
|
|
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[16:24]))
|
|
|
|
h.h.HashUint64(binary.LittleEndian.Uint64(s.sum[24:32]))
|
|
|
|
}
|
|
|
|
|
2021-07-05 04:25:15 +00:00
|
|
|
// hasher is reusable state for hashing a value.
|
|
|
|
// Get one via hasherPool.
|
|
|
|
type hasher struct {
|
2022-08-16 16:49:48 +00:00
|
|
|
hashx.Block512
|
2021-07-22 22:22:48 +00:00
|
|
|
visitStack visitStack
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
var hasherPool = &sync.Pool{
|
|
|
|
New: func() any { return new(hasher) },
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hasher) reset() {
|
|
|
|
if h.Block512.Hash == nil {
|
|
|
|
h.Block512.Hash = sha256.New()
|
|
|
|
}
|
|
|
|
h.Block512.Reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashType hashes a reflect.Type.
|
|
|
|
// The hash is only consistent within the lifetime of a program.
|
|
|
|
func (h *hasher) hashType(t reflect.Type) {
|
|
|
|
// This approach relies on reflect.Type always being backed by a unique
|
|
|
|
// *reflect.rtype pointer. A safer approach is to use a global sync.Map
|
|
|
|
// that maps reflect.Type to some arbitrary and unique index.
|
|
|
|
// While safer, it requires global state with memory that can never be GC'd.
|
|
|
|
rtypeAddr := reflect.ValueOf(t).Pointer() // address of *reflect.rtype
|
|
|
|
h.HashUint64(uint64(rtypeAddr))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hasher) sum() (s Sum) {
|
|
|
|
h.Sum(s.sum[:0])
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Sum is an opaque checksum type that is comparable.
|
|
|
|
type Sum struct {
|
|
|
|
sum [sha256.Size]byte
|
|
|
|
}
|
|
|
|
|
2021-08-03 04:29:14 +00:00
|
|
|
func (s1 *Sum) xor(s2 Sum) {
|
|
|
|
for i := 0; i < sha256.Size; i++ {
|
|
|
|
s1.sum[i] ^= s2.sum[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
func (s Sum) String() string {
|
2022-09-27 22:22:31 +00:00
|
|
|
// Note: if we change this, keep in sync with AppendTo
|
2021-07-20 05:49:51 +00:00
|
|
|
return hex.EncodeToString(s.sum[:])
|
|
|
|
}
|
|
|
|
|
2022-09-27 22:22:31 +00:00
|
|
|
// AppendTo appends the string encoding of this sum (as returned by the String
|
|
|
|
// method) to the provided byte slice and returns the extended buffer.
|
|
|
|
func (s Sum) AppendTo(b []byte) []byte {
|
|
|
|
// TODO: switch to upstream implementation if accepted:
|
|
|
|
// https://github.com/golang/go/issues/53693
|
|
|
|
var lb [len(s.sum) * 2]byte
|
|
|
|
hex.Encode(lb[:], s.sum[:])
|
|
|
|
return append(b, lb[:]...)
|
|
|
|
}
|
|
|
|
|
2021-07-21 16:23:04 +00:00
|
|
|
var (
|
2022-06-16 20:21:32 +00:00
|
|
|
seedOnce sync.Once
|
|
|
|
seed uint64
|
2021-07-21 16:23:04 +00:00
|
|
|
)
|
|
|
|
|
2022-06-16 20:21:32 +00:00
|
|
|
func initSeed() {
|
|
|
|
seed = uint64(time.Now().UnixNano())
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Hash returns the hash of v.
|
2022-08-27 23:08:31 +00:00
|
|
|
func Hash[T any](v *T) Sum {
|
2021-07-07 05:37:32 +00:00
|
|
|
h := hasherPool.Get().(*hasher)
|
|
|
|
defer hasherPool.Put(h)
|
2022-08-27 23:08:31 +00:00
|
|
|
h.reset()
|
2022-06-16 20:21:32 +00:00
|
|
|
seedOnce.Do(initSeed)
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashUint64(seed)
|
2022-06-22 02:50:48 +00:00
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
// Always treat the Hash input as if it were an interface by including
|
|
|
|
// a hash of the type. This ensures that hashing of two different types
|
|
|
|
// but with the same value structure produces different hashes.
|
2024-02-09 01:34:22 +00:00
|
|
|
t := reflect.TypeFor[T]()
|
2022-08-27 23:08:31 +00:00
|
|
|
h.hashType(t)
|
|
|
|
if v == nil {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
} else {
|
|
|
|
h.HashUint8(1) // indicates visiting pointer element
|
|
|
|
p := pointerOf(reflect.ValueOf(v))
|
2022-08-28 00:39:51 +00:00
|
|
|
hash := lookupTypeHasher(t)
|
2022-08-27 23:08:31 +00:00
|
|
|
hash(h, p)
|
2022-06-22 02:50:48 +00:00
|
|
|
}
|
2021-08-03 04:29:14 +00:00
|
|
|
return h.sum()
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
|
|
|
|
2023-08-27 17:09:06 +00:00
|
|
|
// Option is an optional argument to HasherForType.
|
|
|
|
type Option interface {
|
|
|
|
isOption()
|
|
|
|
}
|
|
|
|
|
|
|
|
type fieldFilterOpt struct {
|
|
|
|
t reflect.Type
|
|
|
|
fields set.Set[string]
|
|
|
|
includeOnMatch bool // true to include fields, false to exclude them
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fieldFilterOpt) isOption() {}
|
|
|
|
|
|
|
|
func (f fieldFilterOpt) filterStructField(sf reflect.StructField) (include bool) {
|
|
|
|
if f.fields.Contains(sf.Name) {
|
|
|
|
return f.includeOnMatch
|
|
|
|
}
|
|
|
|
return !f.includeOnMatch
|
|
|
|
}
|
|
|
|
|
|
|
|
// IncludeFields returns an option that modifies the hashing for T to only
|
|
|
|
// include the named struct fields.
|
|
|
|
//
|
|
|
|
// T must be a struct type, and must match the type of the value passed to
|
|
|
|
// HasherForType.
|
|
|
|
func IncludeFields[T any](fields ...string) Option {
|
|
|
|
return newFieldFilter[T](true, fields)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExcludeFields returns an option that modifies the hashing for T to include
|
|
|
|
// all struct fields of T except those provided in fields.
|
|
|
|
//
|
|
|
|
// T must be a struct type, and must match the type of the value passed to
|
|
|
|
// HasherForType.
|
|
|
|
func ExcludeFields[T any](fields ...string) Option {
|
|
|
|
return newFieldFilter[T](false, fields)
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFieldFilter[T any](include bool, fields []string) Option {
|
2024-02-09 01:34:22 +00:00
|
|
|
t := reflect.TypeFor[T]()
|
2023-08-27 17:09:06 +00:00
|
|
|
fieldSet := set.Set[string]{}
|
|
|
|
for _, f := range fields {
|
|
|
|
if _, ok := t.FieldByName(f); !ok {
|
|
|
|
panic(fmt.Sprintf("unknown field %q for type %v", f, t))
|
|
|
|
}
|
|
|
|
fieldSet.Add(f)
|
|
|
|
}
|
|
|
|
return fieldFilterOpt{t, fieldSet, include}
|
|
|
|
}
|
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
// HasherForType returns a hash that is specialized for the provided type.
|
2023-08-27 17:09:06 +00:00
|
|
|
//
|
|
|
|
// HasherForType panics if the opts are invalid for the provided type.
|
|
|
|
//
|
|
|
|
// Currently, at most one option can be provided (IncludeFields or
|
|
|
|
// ExcludeFields) and its type must match the type of T. Those restrictions may
|
|
|
|
// be removed in the future, along with documentation about their precedence
|
|
|
|
// when combined.
|
|
|
|
func HasherForType[T any](opts ...Option) func(*T) Sum {
|
2022-06-15 05:49:11 +00:00
|
|
|
seedOnce.Do(initSeed)
|
2023-08-27 17:09:06 +00:00
|
|
|
if len(opts) > 1 {
|
|
|
|
panic("HasherForType only accepts one optional argument") // for now
|
|
|
|
}
|
2024-02-09 01:34:22 +00:00
|
|
|
t := reflect.TypeFor[T]()
|
2023-08-27 17:09:06 +00:00
|
|
|
var hash typeHasherFunc
|
|
|
|
for _, o := range opts {
|
|
|
|
switch o := o.(type) {
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown HasherOpt %T", o))
|
|
|
|
case fieldFilterOpt:
|
|
|
|
if t.Kind() != reflect.Struct {
|
|
|
|
panic("HasherForStructTypeWithFieldFilter requires T of kind struct")
|
|
|
|
}
|
|
|
|
if t != o.t {
|
|
|
|
panic(fmt.Sprintf("field filter for type %v does not match HasherForType type %v", o.t, t))
|
|
|
|
}
|
|
|
|
hash = makeStructHasher(t, o.filterStructField)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if hash == nil {
|
|
|
|
hash = lookupTypeHasher(t)
|
|
|
|
}
|
2022-08-27 23:08:31 +00:00
|
|
|
return func(v *T) (s Sum) {
|
|
|
|
// This logic is identical to Hash, but pull out a few statements.
|
2022-06-15 05:49:11 +00:00
|
|
|
h := hasherPool.Get().(*hasher)
|
|
|
|
defer hasherPool.Put(h)
|
2022-08-27 23:08:31 +00:00
|
|
|
h.reset()
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashUint64(seed)
|
2022-06-15 05:49:11 +00:00
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
h.hashType(t)
|
|
|
|
if v == nil {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
} else {
|
|
|
|
h.HashUint8(1) // indicates visiting pointer element
|
|
|
|
p := pointerOf(reflect.ValueOf(v))
|
|
|
|
hash(h, p)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
return h.sum()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Update sets last to the hash of v and reports whether its value changed.
|
2022-08-27 23:08:31 +00:00
|
|
|
func Update[T any](last *Sum, v *T) (changed bool) {
|
2021-07-05 04:25:15 +00:00
|
|
|
sum := Hash(v)
|
2022-08-15 18:22:28 +00:00
|
|
|
changed = sum != *last
|
|
|
|
if changed {
|
|
|
|
*last = sum
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
2022-08-15 18:22:28 +00:00
|
|
|
return changed
|
2020-07-29 01:47:23 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
// typeHasherFunc hashes the value pointed at by p for a given type.
|
|
|
|
// For example, if t is a bool, then p is a *bool.
|
|
|
|
// The provided pointer must always be non-nil.
|
|
|
|
type typeHasherFunc func(h *hasher, p pointer)
|
2022-06-15 05:49:11 +00:00
|
|
|
|
2022-08-28 00:39:51 +00:00
|
|
|
var typeHasherCache sync.Map // map[reflect.Type]typeHasherFunc
|
2022-06-15 05:49:11 +00:00
|
|
|
|
2022-08-28 00:39:51 +00:00
|
|
|
func lookupTypeHasher(t reflect.Type) typeHasherFunc {
|
|
|
|
if v, ok := typeHasherCache.Load(t); ok {
|
|
|
|
return v.(typeHasherFunc)
|
|
|
|
}
|
|
|
|
hash := makeTypeHasher(t)
|
|
|
|
v, _ := typeHasherCache.LoadOrStore(t, hash)
|
|
|
|
return v.(typeHasherFunc)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-08-28 00:39:51 +00:00
|
|
|
func makeTypeHasher(t reflect.Type) typeHasherFunc {
|
2022-08-27 03:50:56 +00:00
|
|
|
// Types with specific hashing.
|
|
|
|
switch t {
|
|
|
|
case timeTimeType:
|
2022-08-27 23:08:31 +00:00
|
|
|
return hashTime
|
2022-08-27 03:50:56 +00:00
|
|
|
case netipAddrType:
|
2022-08-27 23:08:31 +00:00
|
|
|
return hashAddr
|
2022-08-27 03:50:56 +00:00
|
|
|
}
|
|
|
|
|
2024-01-31 23:33:59 +00:00
|
|
|
// Types that implement their own hashing.
|
|
|
|
if t.Kind() != reflect.Pointer && t.Kind() != reflect.Interface {
|
|
|
|
// A method can be implemented on either the value receiver or pointer receiver.
|
|
|
|
if t.Implements(selfHasherType) || reflect.PointerTo(t).Implements(selfHasherType) {
|
2024-02-02 01:07:41 +00:00
|
|
|
return makeSelfHasher(t)
|
2024-01-31 23:33:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 03:50:56 +00:00
|
|
|
// Types that can have their memory representation directly hashed.
|
|
|
|
if typeIsMemHashable(t) {
|
|
|
|
return makeMemHasher(t.Size())
|
|
|
|
}
|
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
switch t.Kind() {
|
|
|
|
case reflect.String:
|
2022-08-27 23:08:31 +00:00
|
|
|
return hashString
|
2022-06-15 05:49:11 +00:00
|
|
|
case reflect.Array:
|
2022-08-27 22:37:36 +00:00
|
|
|
return makeArrayHasher(t)
|
|
|
|
case reflect.Slice:
|
|
|
|
return makeSliceHasher(t)
|
2022-06-15 05:49:11 +00:00
|
|
|
case reflect.Struct:
|
2023-08-27 17:09:06 +00:00
|
|
|
return makeStructHasher(t, keepAllStructFields)
|
2022-08-24 08:31:01 +00:00
|
|
|
case reflect.Map:
|
2022-08-27 22:49:26 +00:00
|
|
|
return makeMapHasher(t)
|
2022-06-15 05:49:11 +00:00
|
|
|
case reflect.Pointer:
|
2022-08-27 22:51:34 +00:00
|
|
|
return makePointerHasher(t)
|
2022-08-24 08:31:01 +00:00
|
|
|
case reflect.Interface:
|
2022-08-27 22:51:34 +00:00
|
|
|
return makeInterfaceHasher(t)
|
2022-08-24 08:31:01 +00:00
|
|
|
default: // Func, Chan, UnsafePointer
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(*hasher, pointer) {}
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
func hashTime(h *hasher, p pointer) {
|
2022-08-12 21:42:51 +00:00
|
|
|
// Include the zone offset (but not the name) to keep
|
|
|
|
// Hash(t1) == Hash(t2) being semantically equivalent to
|
|
|
|
// t1.Format(time.RFC3339Nano) == t2.Format(time.RFC3339Nano).
|
2022-08-27 19:30:35 +00:00
|
|
|
t := *p.asTime()
|
2022-08-12 21:42:51 +00:00
|
|
|
_, offset := t.Zone()
|
|
|
|
h.HashUint64(uint64(t.Unix()))
|
|
|
|
h.HashUint32(uint32(t.Nanosecond()))
|
|
|
|
h.HashUint32(uint32(offset))
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
func hashAddr(h *hasher, p pointer) {
|
2022-08-19 05:54:56 +00:00
|
|
|
// The formatting of netip.Addr covers the
|
|
|
|
// IP version, the address, and the optional zone name (for v6).
|
|
|
|
// This is equivalent to a1.MarshalBinary() == a2.MarshalBinary().
|
2022-08-27 19:30:35 +00:00
|
|
|
ip := *p.asAddr()
|
2022-08-19 05:54:56 +00:00
|
|
|
switch {
|
|
|
|
case !ip.IsValid():
|
|
|
|
h.HashUint64(0)
|
|
|
|
case ip.Is4():
|
|
|
|
b := ip.As4()
|
|
|
|
h.HashUint64(4)
|
|
|
|
h.HashUint32(binary.LittleEndian.Uint32(b[:]))
|
|
|
|
case ip.Is6():
|
|
|
|
b := ip.As16()
|
|
|
|
z := ip.Zone()
|
|
|
|
h.HashUint64(16 + uint64(len(z)))
|
|
|
|
h.HashUint64(binary.LittleEndian.Uint64(b[:8]))
|
|
|
|
h.HashUint64(binary.LittleEndian.Uint64(b[8:]))
|
|
|
|
h.HashString(z)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-02 01:07:41 +00:00
|
|
|
func makeSelfHasher(t reflect.Type) typeHasherFunc {
|
2024-01-31 23:33:59 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
2024-02-02 01:07:41 +00:00
|
|
|
p.asValue(t).Interface().(SelfHasher).Hash(Hasher{&h.Block512})
|
2024-01-31 23:33:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 23:08:31 +00:00
|
|
|
func hashString(h *hasher, p pointer) {
|
|
|
|
s := *p.asString()
|
|
|
|
h.HashUint64(uint64(len(s)))
|
|
|
|
h.HashString(s)
|
|
|
|
}
|
|
|
|
|
2022-08-27 03:50:56 +00:00
|
|
|
func makeMemHasher(n uintptr) typeHasherFunc {
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
h.HashBytes(p.asMemory(n))
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 22:37:36 +00:00
|
|
|
func makeArrayHasher(t reflect.Type) typeHasherFunc {
|
|
|
|
var once sync.Once
|
|
|
|
var hashElem typeHasherFunc
|
|
|
|
init := func() {
|
2022-08-28 00:39:51 +00:00
|
|
|
hashElem = lookupTypeHasher(t.Elem())
|
2022-08-27 22:37:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n := t.Len() // number of array elements
|
|
|
|
nb := t.Elem().Size() // byte size of each array element
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
2022-08-27 22:37:36 +00:00
|
|
|
once.Do(init)
|
2022-06-15 05:49:11 +00:00
|
|
|
for i := 0; i < n; i++ {
|
2022-08-27 22:37:36 +00:00
|
|
|
hashElem(h, p.arrayIndex(i, nb))
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 22:37:36 +00:00
|
|
|
func makeSliceHasher(t reflect.Type) typeHasherFunc {
|
|
|
|
nb := t.Elem().Size() // byte size of each slice element
|
|
|
|
if typeIsMemHashable(t.Elem()) {
|
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
pa := p.sliceArray()
|
2022-08-30 07:33:18 +00:00
|
|
|
if pa.isNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.HashUint8(1) // indicates visiting slice
|
2022-08-27 22:37:36 +00:00
|
|
|
n := p.sliceLen()
|
|
|
|
b := pa.asMemory(uintptr(n) * nb)
|
|
|
|
h.HashUint64(uint64(n))
|
|
|
|
h.HashBytes(b)
|
|
|
|
}
|
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
|
2022-08-27 22:37:36 +00:00
|
|
|
var once sync.Once
|
|
|
|
var hashElem typeHasherFunc
|
|
|
|
init := func() {
|
2022-08-28 00:39:51 +00:00
|
|
|
hashElem = lookupTypeHasher(t.Elem())
|
2022-08-30 07:33:18 +00:00
|
|
|
if typeIsRecursive(t) {
|
|
|
|
hashElemDefault := hashElem
|
|
|
|
hashElem = func(h *hasher, p pointer) {
|
|
|
|
if idx, ok := h.visitStack.seen(p.p); ok {
|
|
|
|
h.HashUint8(2) // indicates cycle
|
|
|
|
h.HashUint64(uint64(idx))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.HashUint8(1) // indicates visiting slice element
|
|
|
|
h.visitStack.push(p.p)
|
|
|
|
defer h.visitStack.pop(p.p)
|
|
|
|
hashElemDefault(h, p)
|
|
|
|
}
|
|
|
|
}
|
2022-08-27 22:37:36 +00:00
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
|
2022-08-27 22:37:36 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
pa := p.sliceArray()
|
2022-08-30 07:33:18 +00:00
|
|
|
if pa.isNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
return
|
|
|
|
}
|
2022-08-27 22:37:36 +00:00
|
|
|
once.Do(init)
|
2022-08-30 07:33:18 +00:00
|
|
|
h.HashUint8(1) // indicates visiting slice
|
2022-08-27 22:37:36 +00:00
|
|
|
n := p.sliceLen()
|
|
|
|
h.HashUint64(uint64(n))
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
pe := pa.arrayIndex(i, nb)
|
|
|
|
hashElem(h, pe)
|
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-27 17:09:06 +00:00
|
|
|
func keepAllStructFields(keepField reflect.StructField) bool { return true }
|
|
|
|
|
|
|
|
func makeStructHasher(t reflect.Type, keepField func(reflect.StructField) bool) typeHasherFunc {
|
2022-08-27 22:39:46 +00:00
|
|
|
type fieldHasher struct {
|
2023-08-27 17:09:06 +00:00
|
|
|
idx int // index of field for reflect.Type.Field(n); negative if memory is directly hashable
|
|
|
|
keep bool
|
2022-08-27 22:39:46 +00:00
|
|
|
hash typeHasherFunc // only valid if idx is not negative
|
|
|
|
offset uintptr
|
|
|
|
size uintptr
|
|
|
|
}
|
|
|
|
var once sync.Once
|
|
|
|
var fields []fieldHasher
|
|
|
|
init := func() {
|
|
|
|
for i, numField := 0, t.NumField(); i < numField; i++ {
|
|
|
|
sf := t.Field(i)
|
2023-08-27 17:09:06 +00:00
|
|
|
f := fieldHasher{i, keepField(sf), nil, sf.Offset, sf.Type.Size()}
|
|
|
|
if f.keep && typeIsMemHashable(sf.Type) {
|
2022-08-27 22:39:46 +00:00
|
|
|
f.idx = -1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Combine with previous field if both contiguous and mem-hashable.
|
|
|
|
if f.idx < 0 && len(fields) > 0 {
|
|
|
|
if last := &fields[len(fields)-1]; last.idx < 0 && last.offset+last.size == f.offset {
|
|
|
|
last.size += f.size
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fields = append(fields, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, f := range fields {
|
|
|
|
if f.idx >= 0 {
|
2022-08-28 00:39:51 +00:00
|
|
|
fields[i].hash = lookupTypeHasher(t.Field(f.idx).Type)
|
2022-08-27 22:39:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
once.Do(init)
|
|
|
|
for _, field := range fields {
|
2023-08-27 17:09:06 +00:00
|
|
|
if !field.keep {
|
|
|
|
continue
|
|
|
|
}
|
2022-08-27 22:39:46 +00:00
|
|
|
pf := p.structField(field.idx, field.offset, field.size)
|
|
|
|
if field.idx < 0 {
|
|
|
|
h.HashBytes(pf.asMemory(field.size))
|
|
|
|
} else {
|
|
|
|
field.hash(h, pf)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 22:49:26 +00:00
|
|
|
func makeMapHasher(t reflect.Type) typeHasherFunc {
|
|
|
|
var once sync.Once
|
|
|
|
var hashKey, hashValue typeHasherFunc
|
|
|
|
var isRecursive bool
|
|
|
|
init := func() {
|
2022-08-28 00:39:51 +00:00
|
|
|
hashKey = lookupTypeHasher(t.Key())
|
|
|
|
hashValue = lookupTypeHasher(t.Elem())
|
2022-08-27 22:49:26 +00:00
|
|
|
isRecursive = typeIsRecursive(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
v := p.asValue(t).Elem() // reflect.Map kind
|
|
|
|
if v.IsNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
once.Do(init)
|
|
|
|
if isRecursive {
|
|
|
|
pm := v.UnsafePointer() // underlying pointer of map
|
|
|
|
if idx, ok := h.visitStack.seen(pm); ok {
|
|
|
|
h.HashUint8(2) // indicates cycle
|
|
|
|
h.HashUint64(uint64(idx))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.visitStack.push(pm)
|
|
|
|
defer h.visitStack.pop(pm)
|
|
|
|
}
|
|
|
|
h.HashUint8(1) // indicates visiting map entries
|
|
|
|
h.HashUint64(uint64(v.Len()))
|
|
|
|
|
|
|
|
mh := mapHasherPool.Get().(*mapHasher)
|
|
|
|
defer mapHasherPool.Put(mh)
|
|
|
|
|
2022-09-25 18:29:55 +00:00
|
|
|
// Hash a map in a sort-free manner.
|
2022-08-27 22:49:26 +00:00
|
|
|
// It relies on a map being a an unordered set of KV entries.
|
|
|
|
// So long as we hash each KV entry together, we can XOR all the
|
|
|
|
// individual hashes to produce a unique hash for the entire map.
|
|
|
|
k := mh.valKey.get(v.Type().Key())
|
|
|
|
e := mh.valElem.get(v.Type().Elem())
|
|
|
|
mh.sum = Sum{}
|
|
|
|
mh.h.visitStack = h.visitStack // always use the parent's visit stack to avoid cycles
|
|
|
|
for iter := v.MapRange(); iter.Next(); {
|
|
|
|
k.SetIterKey(iter)
|
|
|
|
e.SetIterValue(iter)
|
2022-08-27 23:08:31 +00:00
|
|
|
mh.h.reset()
|
2022-08-27 22:49:26 +00:00
|
|
|
hashKey(&mh.h, pointerOf(k.Addr()))
|
|
|
|
hashValue(&mh.h, pointerOf(e.Addr()))
|
|
|
|
mh.sum.xor(mh.h.sum())
|
|
|
|
}
|
|
|
|
h.HashBytes(mh.sum.sum[:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 22:51:34 +00:00
|
|
|
func makePointerHasher(t reflect.Type) typeHasherFunc {
|
|
|
|
var once sync.Once
|
|
|
|
var hashElem typeHasherFunc
|
|
|
|
var isRecursive bool
|
|
|
|
init := func() {
|
2022-08-28 00:39:51 +00:00
|
|
|
hashElem = lookupTypeHasher(t.Elem())
|
2022-08-27 22:51:34 +00:00
|
|
|
isRecursive = typeIsRecursive(t)
|
|
|
|
}
|
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
pe := p.pointerElem()
|
|
|
|
if pe.isNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
once.Do(init)
|
|
|
|
if isRecursive {
|
|
|
|
if idx, ok := h.visitStack.seen(pe.p); ok {
|
|
|
|
h.HashUint8(2) // indicates cycle
|
|
|
|
h.HashUint64(uint64(idx))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.visitStack.push(pe.p)
|
|
|
|
defer h.visitStack.pop(pe.p)
|
|
|
|
}
|
|
|
|
h.HashUint8(1) // indicates visiting a pointer element
|
|
|
|
hashElem(h, pe)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func makeInterfaceHasher(t reflect.Type) typeHasherFunc {
|
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
v := p.asValue(t).Elem() // reflect.Interface kind
|
|
|
|
if v.IsNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.HashUint8(1) // indicates visiting an interface value
|
|
|
|
v = v.Elem()
|
|
|
|
t := v.Type()
|
|
|
|
h.hashType(t)
|
|
|
|
va := reflect.New(t).Elem()
|
|
|
|
va.Set(v)
|
2022-08-28 00:39:51 +00:00
|
|
|
hashElem := lookupTypeHasher(t)
|
2022-08-27 22:51:34 +00:00
|
|
|
hashElem(h, pointerOf(va.Addr()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 20:17:12 +00:00
|
|
|
type mapHasher struct {
|
2022-08-27 22:49:26 +00:00
|
|
|
h hasher
|
|
|
|
valKey valueCache
|
|
|
|
valElem valueCache
|
|
|
|
sum Sum
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var mapHasherPool = &sync.Pool{
|
2022-03-16 23:27:57 +00:00
|
|
|
New: func() any { return new(mapHasher) },
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
type valueCache map[reflect.Type]reflect.Value
|
2021-05-18 16:20:52 +00:00
|
|
|
|
2022-08-27 22:49:26 +00:00
|
|
|
// get returns an addressable reflect.Value for the given type.
|
2022-08-27 19:30:35 +00:00
|
|
|
func (c *valueCache) get(t reflect.Type) reflect.Value {
|
2021-08-03 04:29:14 +00:00
|
|
|
v, ok := (*c)[t]
|
2021-05-18 16:20:52 +00:00
|
|
|
if !ok {
|
2022-08-27 19:30:35 +00:00
|
|
|
v = reflect.New(t).Elem()
|
2021-08-03 04:29:14 +00:00
|
|
|
if *c == nil {
|
|
|
|
*c = make(valueCache)
|
|
|
|
}
|
|
|
|
(*c)[t] = v
|
2021-05-18 16:20:52 +00:00
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|