2020-06-30 02:36:45 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2021-07-21 16:23:04 +00:00
|
|
|
// Package deephash hashes a Go value recursively, in a predictable order,
|
|
|
|
// without looping. The hash is only valid within the lifetime of a program.
|
|
|
|
// Users should not store the hash on disk or send it over the network.
|
|
|
|
// The hash is sufficiently strong and unique such that
|
|
|
|
// Hash(x) == Hash(y) is an appropriate replacement for x == y.
|
2021-07-03 04:30:29 +00:00
|
|
|
//
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// The definition of equality is identical to reflect.DeepEqual except:
|
2022-08-02 16:33:46 +00:00
|
|
|
// - Floating-point values are compared based on the raw bits,
|
|
|
|
// which means that NaNs (with the same bit pattern) are treated as equal.
|
2022-08-12 21:42:51 +00:00
|
|
|
// - time.Time are compared based on whether they are the same instant in time
|
|
|
|
// and also in the same zone offset. Monotonic measurements and zone names
|
|
|
|
// are ignored as part of the hash.
|
2022-08-24 08:31:01 +00:00
|
|
|
// - netip.Addr are compared based on a shallow comparison of the struct.
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
//
|
|
|
|
// WARNING: This package, like most of the tailscale.com Go module,
|
|
|
|
// should be considered Tailscale-internal; we make no API promises.
|
2021-05-11 19:09:25 +00:00
|
|
|
package deephash
|
2020-06-28 17:58:21 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/sha256"
|
2021-07-06 05:13:33 +00:00
|
|
|
"encoding/binary"
|
2021-05-19 18:51:21 +00:00
|
|
|
"encoding/hex"
|
2020-06-28 17:58:21 +00:00
|
|
|
"reflect"
|
2021-05-11 20:17:12 +00:00
|
|
|
"sync"
|
2021-07-21 16:23:04 +00:00
|
|
|
"time"
|
2022-08-12 00:44:09 +00:00
|
|
|
|
2022-08-16 20:15:33 +00:00
|
|
|
"tailscale.com/util/hashx"
|
2020-06-28 17:58:21 +00:00
|
|
|
)
|
|
|
|
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// There is much overlap between the theory of serialization and hashing.
|
2021-08-24 14:36:48 +00:00
|
|
|
// A hash (useful for determining equality) can be produced by printing a value
|
util/deephash: remove unnecessary formatting for structs and slices (#2571)
The index for every struct field or slice element and
the number of fields for the struct is unncessary.
The hashing of Go values is unambiguous because every type (except maps)
encodes in a parsable manner. So long as we know the type information,
we could theoretically decode every value (except for maps).
At a high level:
* numbers are encoded as fixed-width records according to precision.
* strings (and AppendTo output) are encoded with a fixed-width length,
followed by the contents of the buffer.
* slices are prefixed by a fixed-width length, followed by the encoding
of each value. So long as we know the type of each element, we could
theoretically decode each element.
* arrays are encoded just like slices, but elide the length
since it is determined from the Go type.
* maps are encoded first with a byte indicating whether it is a cycle.
If a cycle, it is followed by a fixed-width index for the pointer,
otherwise followed by the SHA-256 hash of its contents. The encoding of maps
is not decodeable, but a SHA-256 hash is sufficient to avoid ambiguities.
* interfaces are encoded first with a byte indicating whether it is nil.
If not nil, it is followed by a fixed-width index for the type,
and then the encoding for the underlying value. Having the type be encoded
first ensures that the value could theoretically be decoded next.
* pointers are encoded first with a byte indicating whether it is
1) nil, 2) a cycle, or 3) newly seen. If a cycle, it is followed by
a fixed-width index for the pointer. If newly seen, it is followed by
the encoding for the pointed-at value.
Removing unnecessary details speeds up hashing:
name old time/op new time/op delta
Hash-8 76.0µs ± 1% 55.8µs ± 2% -26.62% (p=0.000 n=10+10)
HashMapAcyclic-8 61.9µs ± 0% 62.0µs ± 0% ~ (p=0.666 n=9+9)
TailcfgNode-8 10.2µs ± 1% 7.5µs ± 1% -26.90% (p=0.000 n=10+9)
HashArray-8 1.07µs ± 1% 0.70µs ± 1% -34.67% (p=0.000 n=10+9)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2021-08-04 03:35:57 +00:00
|
|
|
// and hashing the output. The format must:
|
|
|
|
// * be deterministic such that the same value hashes to the same output, and
|
|
|
|
// * be parsable such that the same value can be reproduced by the output.
|
|
|
|
//
|
|
|
|
// The logic below hashes a value by printing it to a hash.Hash.
|
|
|
|
// To be parsable, it assumes that we know the Go type of each value:
|
|
|
|
// * scalar types (e.g., bool or int32) are printed as fixed-width fields.
|
|
|
|
// * list types (e.g., strings, slices, and AppendTo buffers) are prefixed
|
|
|
|
// by a fixed-width length field, followed by the contents of the list.
|
|
|
|
// * slices, arrays, and structs print each element/field consecutively.
|
|
|
|
// * interfaces print with a 1-byte prefix indicating whether it is nil.
|
|
|
|
// If non-nil, it is followed by a fixed-width field of the type index,
|
|
|
|
// followed by the format of the underlying value.
|
|
|
|
// * pointers print with a 1-byte prefix indicating whether the pointer is
|
|
|
|
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers are
|
|
|
|
// followed by a fixed-width field with the index of the previous pointer.
|
|
|
|
// Newly seen pointers are followed by the format of the underlying value.
|
|
|
|
// * maps print with a 1-byte prefix indicating whether the map pointer is
|
|
|
|
// 1) nil, 2) previously seen, or 3) newly seen. Previously seen pointers
|
|
|
|
// are followed by a fixed-width field of the index of the previous pointer.
|
|
|
|
// Newly seen maps are printed as a fixed-width field with the XOR of the
|
|
|
|
// hash of every map entry. With a sufficiently strong hash, this value is
|
|
|
|
// theoretically "parsable" by looking up the hash in a magical map that
|
|
|
|
// returns the set of entries for that given hash.
|
|
|
|
|
2021-07-06 05:13:33 +00:00
|
|
|
const scratchSize = 128
|
|
|
|
|
2021-07-05 04:25:15 +00:00
|
|
|
// hasher is reusable state for hashing a value.
|
|
|
|
// Get one via hasherPool.
|
|
|
|
type hasher struct {
|
2022-08-16 16:49:48 +00:00
|
|
|
hashx.Block512
|
2021-07-22 22:22:48 +00:00
|
|
|
scratch [scratchSize]byte
|
|
|
|
visitStack visitStack
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Sum is an opaque checksum type that is comparable.
|
|
|
|
type Sum struct {
|
|
|
|
sum [sha256.Size]byte
|
|
|
|
}
|
|
|
|
|
2021-08-03 04:29:14 +00:00
|
|
|
func (s1 *Sum) xor(s2 Sum) {
|
|
|
|
for i := 0; i < sha256.Size; i++ {
|
|
|
|
s1.sum[i] ^= s2.sum[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
func (s Sum) String() string {
|
|
|
|
return hex.EncodeToString(s.sum[:])
|
|
|
|
}
|
|
|
|
|
2021-07-21 16:23:04 +00:00
|
|
|
var (
|
2022-06-16 20:21:32 +00:00
|
|
|
seedOnce sync.Once
|
|
|
|
seed uint64
|
2021-07-21 16:23:04 +00:00
|
|
|
)
|
|
|
|
|
2022-06-16 20:21:32 +00:00
|
|
|
func initSeed() {
|
|
|
|
seed = uint64(time.Now().UnixNano())
|
|
|
|
}
|
|
|
|
|
2022-08-16 16:49:48 +00:00
|
|
|
func (h *hasher) Reset() {
|
|
|
|
if h.Block512.Hash == nil {
|
|
|
|
h.Block512.Hash = sha256.New()
|
|
|
|
}
|
|
|
|
h.Block512.Reset()
|
|
|
|
}
|
|
|
|
|
2021-08-03 04:29:14 +00:00
|
|
|
func (h *hasher) sum() (s Sum) {
|
2022-08-12 00:44:09 +00:00
|
|
|
h.Sum(s.sum[:0])
|
2021-08-03 04:29:14 +00:00
|
|
|
return s
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var hasherPool = &sync.Pool{
|
2022-03-16 23:27:57 +00:00
|
|
|
New: func() any { return new(hasher) },
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Hash returns the hash of v.
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
// For performance, this should be a non-nil pointer.
|
2022-03-16 23:27:57 +00:00
|
|
|
func Hash(v any) (s Sum) {
|
2021-07-07 05:37:32 +00:00
|
|
|
h := hasherPool.Get().(*hasher)
|
|
|
|
defer hasherPool.Put(h)
|
2022-08-12 00:44:09 +00:00
|
|
|
h.Reset()
|
2022-06-16 20:21:32 +00:00
|
|
|
seedOnce.Do(initSeed)
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashUint64(seed)
|
2022-06-22 02:50:48 +00:00
|
|
|
|
|
|
|
rv := reflect.ValueOf(v)
|
|
|
|
if rv.IsValid() {
|
2022-08-27 19:30:35 +00:00
|
|
|
var t reflect.Type
|
|
|
|
var p pointer
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
if rv.Kind() == reflect.Pointer && !rv.IsNil() {
|
2022-08-27 19:30:35 +00:00
|
|
|
t = rv.Type().Elem()
|
|
|
|
p = pointerOf(rv)
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
} else {
|
2022-08-27 19:30:35 +00:00
|
|
|
t = rv.Type()
|
|
|
|
va := reflect.New(t).Elem()
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
va.Set(rv)
|
2022-08-27 19:30:35 +00:00
|
|
|
p = pointerOf(va.Addr())
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
}
|
|
|
|
|
2022-06-22 02:50:48 +00:00
|
|
|
// Always treat the Hash input as an interface (it is), including hashing
|
|
|
|
// its type, otherwise two Hash calls of different types could hash to the
|
|
|
|
// same bytes off the different types and get equivalent Sum values. This is
|
|
|
|
// the same thing that we do for reflect.Kind Interface in hashValue, but
|
|
|
|
// the initial reflect.ValueOf from an interface value effectively strips
|
|
|
|
// the interface box off so we have to do it at the top level by hand.
|
2022-08-27 19:30:35 +00:00
|
|
|
h.hashType(t)
|
|
|
|
ti := getTypeInfo(t)
|
|
|
|
ti.hasher()(h, p)
|
2022-06-22 02:50:48 +00:00
|
|
|
}
|
2021-08-03 04:29:14 +00:00
|
|
|
return h.sum()
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
// HasherForType is like Hash, but it returns a Hash func that's specialized for
|
|
|
|
// the provided reflect type, avoiding a map lookup per value.
|
|
|
|
func HasherForType[T any]() func(T) Sum {
|
|
|
|
var zeroT T
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
t := reflect.TypeOf(zeroT)
|
|
|
|
ti := getTypeInfo(t)
|
|
|
|
var tiElem *typeInfo
|
|
|
|
if t.Kind() == reflect.Pointer {
|
|
|
|
tiElem = getTypeInfo(t.Elem())
|
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
seedOnce.Do(initSeed)
|
|
|
|
|
2022-08-12 00:44:09 +00:00
|
|
|
return func(v T) (s Sum) {
|
2022-06-15 05:49:11 +00:00
|
|
|
h := hasherPool.Get().(*hasher)
|
|
|
|
defer hasherPool.Put(h)
|
2022-08-12 00:44:09 +00:00
|
|
|
h.Reset()
|
|
|
|
h.HashUint64(seed)
|
2022-06-15 05:49:11 +00:00
|
|
|
|
|
|
|
rv := reflect.ValueOf(v)
|
|
|
|
|
|
|
|
if rv.IsValid() {
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
if rv.Kind() == reflect.Pointer && !rv.IsNil() {
|
2022-08-27 19:30:35 +00:00
|
|
|
p := pointerOf(rv)
|
|
|
|
h.hashType(t.Elem())
|
|
|
|
tiElem.hasher()(h, p)
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
} else {
|
2022-08-27 19:30:35 +00:00
|
|
|
va := reflect.New(t).Elem()
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
va.Set(rv)
|
2022-08-27 19:30:35 +00:00
|
|
|
p := pointerOf(va.Addr())
|
|
|
|
h.hashType(t)
|
|
|
|
ti.hasher()(h, p)
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
return h.sum()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-20 05:49:51 +00:00
|
|
|
// Update sets last to the hash of v and reports whether its value changed.
|
2022-08-15 18:22:28 +00:00
|
|
|
func Update(last *Sum, v any) (changed bool) {
|
2021-07-05 04:25:15 +00:00
|
|
|
sum := Hash(v)
|
2022-08-15 18:22:28 +00:00
|
|
|
changed = sum != *last
|
|
|
|
if changed {
|
|
|
|
*last = sum
|
2021-07-05 04:25:15 +00:00
|
|
|
}
|
2022-08-15 18:22:28 +00:00
|
|
|
return changed
|
2020-07-29 01:47:23 +00:00
|
|
|
}
|
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
// typeInfo describes properties of a type.
|
2022-06-15 05:49:11 +00:00
|
|
|
//
|
|
|
|
// A non-nil typeInfo is populated into the typeHasher map
|
|
|
|
// when its type is first requested, before its func is created.
|
|
|
|
// Its func field fn is only populated once the type has been created.
|
|
|
|
// This is used for recursive types.
|
2022-06-15 05:49:11 +00:00
|
|
|
type typeInfo struct {
|
|
|
|
rtype reflect.Type
|
|
|
|
isRecursive bool
|
|
|
|
|
|
|
|
// elemTypeInfo is the element type's typeInfo.
|
|
|
|
// It's set when rtype is of Kind Ptr, Slice, Array, Map.
|
|
|
|
elemTypeInfo *typeInfo
|
|
|
|
|
|
|
|
// keyTypeInfo is the map key type's typeInfo.
|
|
|
|
// It's set when rtype is of Kind Map.
|
|
|
|
keyTypeInfo *typeInfo
|
2022-06-15 05:49:11 +00:00
|
|
|
|
|
|
|
hashFuncOnce sync.Once
|
|
|
|
hashFuncLazy typeHasherFunc // nil until created
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
// typeHasherFunc hashes the value pointed at by p for a given type.
|
|
|
|
// For example, if t is a bool, then p is a *bool.
|
|
|
|
// The provided pointer must always be non-nil.
|
|
|
|
type typeHasherFunc func(h *hasher, p pointer)
|
2022-06-15 05:49:11 +00:00
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
var typeInfoMap sync.Map // map[reflect.Type]*typeInfo
|
|
|
|
var typeInfoMapPopulate sync.Mutex // just for adding to typeInfoMap
|
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
func (ti *typeInfo) hasher() typeHasherFunc {
|
|
|
|
ti.hashFuncOnce.Do(ti.buildHashFuncOnce)
|
|
|
|
return ti.hashFuncLazy
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ti *typeInfo) buildHashFuncOnce() {
|
2022-08-24 08:31:01 +00:00
|
|
|
ti.hashFuncLazy = genTypeHasher(ti)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// fieldInfo describes a struct field.
|
|
|
|
type fieldInfo struct {
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
index int // index of field for reflect.Value.Field(n); -1 if invalid
|
2022-06-15 05:49:11 +00:00
|
|
|
typeInfo *typeInfo
|
|
|
|
canMemHash bool
|
|
|
|
offset uintptr // when we can memhash the field
|
|
|
|
size uintptr // when we can memhash the field
|
|
|
|
}
|
|
|
|
|
|
|
|
// mergeContiguousFieldsCopy returns a copy of f with contiguous memhashable fields
|
|
|
|
// merged together. Such fields get a bogus index and fu value.
|
|
|
|
func mergeContiguousFieldsCopy(in []fieldInfo) []fieldInfo {
|
|
|
|
ret := make([]fieldInfo, 0, len(in))
|
|
|
|
var last *fieldInfo
|
|
|
|
for _, f := range in {
|
|
|
|
// Combine two fields if they're both contiguous & memhash-able.
|
|
|
|
if f.canMemHash && last != nil && last.canMemHash && last.offset+last.size == f.offset {
|
|
|
|
last.size += f.size
|
|
|
|
last.index = -1
|
|
|
|
last.typeInfo = nil
|
|
|
|
} else {
|
|
|
|
ret = append(ret, f)
|
|
|
|
last = &ret[len(ret)-1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
// genHashStructFields generates a typeHasherFunc for t, which must be of kind Struct.
|
|
|
|
func genHashStructFields(t reflect.Type) typeHasherFunc {
|
|
|
|
fields := make([]fieldInfo, 0, t.NumField())
|
|
|
|
for i, n := 0, t.NumField(); i < n; i++ {
|
|
|
|
sf := t.Field(i)
|
|
|
|
if sf.Type.Size() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fields = append(fields, fieldInfo{
|
|
|
|
index: i,
|
|
|
|
typeInfo: getTypeInfo(sf.Type),
|
2022-08-16 16:31:19 +00:00
|
|
|
canMemHash: typeIsMemHashable(sf.Type),
|
2022-06-15 05:49:11 +00:00
|
|
|
offset: sf.Offset,
|
|
|
|
size: sf.Type.Size(),
|
|
|
|
})
|
|
|
|
}
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
fields = mergeContiguousFieldsCopy(fields)
|
|
|
|
return structHasher{fields}.hash
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type structHasher struct {
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
fields []fieldInfo
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
func (sh structHasher) hash(h *hasher, p pointer) {
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
for _, f := range sh.fields {
|
2022-08-27 19:30:35 +00:00
|
|
|
pf := p.structField(f.index, f.offset, f.size)
|
util/deephash: always keep values addressable (#5328)
The logic of deephash is both simpler and easier to reason about
if values are always addressable.
In Go, the composite kinds are slices, arrays, maps, structs,
interfaces, pointers, channels, and functions,
where we define "composite" as a Go value that encapsulates
some other Go value (e.g., a map is a collection of key-value entries).
In the cases of pointers and slices, the sub-values are always addressable.
In the cases of arrays and structs, the sub-values are always addressable
if and only if the parent value is addressable.
In the case of maps and interfaces, the sub-values are never addressable.
To make them addressable, we need to copy them onto the heap.
For the purposes of deephash, we do not care about channels and functions.
For all non-composite kinds (e.g., strings and ints), they are only addressable
if obtained from one of the composite kinds that produce addressable values
(i.e., pointers, slices, addressable arrays, and addressable structs).
A non-addressible, non-composite kind can be made addressable by
allocating it on the heap, obtaining a pointer to it, and dereferencing it.
Thus, if we can ensure that values are addressable at the entry points,
and shallow copy sub-values whenever we encounter an interface or map,
then we can ensure that all values are always addressable and
assume such property throughout all the logic.
Performance:
name old time/op new time/op delta
Hash-24 21.5µs ± 1% 19.7µs ± 1% -8.29% (p=0.000 n=9+9)
HashPacketFilter-24 2.61µs ± 1% 2.62µs ± 0% +0.29% (p=0.037 n=10+9)
HashMapAcyclic-24 30.8µs ± 1% 30.9µs ± 1% ~ (p=0.400 n=9+10)
TailcfgNode-24 1.84µs ± 1% 1.84µs ± 2% ~ (p=0.928 n=10+10)
HashArray-24 324ns ± 2% 332ns ± 2% +2.45% (p=0.000 n=10+10)
Signed-off-by: Joe Tsai <joetsai@digital-static.net>
2022-08-10 05:00:02 +00:00
|
|
|
if f.canMemHash {
|
2022-08-27 19:30:35 +00:00
|
|
|
h.HashBytes(pf.asMemory(f.size))
|
2022-06-15 05:49:11 +00:00
|
|
|
} else {
|
2022-08-27 19:30:35 +00:00
|
|
|
f.typeInfo.hasher()(h, pf)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-24 08:31:01 +00:00
|
|
|
func genTypeHasher(ti *typeInfo) typeHasherFunc {
|
|
|
|
t := ti.rtype
|
2022-08-27 03:50:56 +00:00
|
|
|
|
|
|
|
// Types with specific hashing.
|
|
|
|
switch t {
|
|
|
|
case timeTimeType:
|
|
|
|
return (*hasher).hashTimev
|
|
|
|
case netipAddrType:
|
|
|
|
return (*hasher).hashAddrv
|
|
|
|
}
|
|
|
|
|
|
|
|
// Types that can have their memory representation directly hashed.
|
|
|
|
if typeIsMemHashable(t) {
|
|
|
|
return makeMemHasher(t.Size())
|
|
|
|
}
|
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
switch t.Kind() {
|
|
|
|
case reflect.String:
|
|
|
|
return (*hasher).hashString
|
|
|
|
case reflect.Slice:
|
|
|
|
et := t.Elem()
|
2022-08-16 16:31:19 +00:00
|
|
|
if typeIsMemHashable(et) {
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
pa := p.sliceArray()
|
|
|
|
vLen := p.sliceLen()
|
|
|
|
h.HashUint64(uint64(vLen))
|
|
|
|
if vLen == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.HashBytes(pa.asMemory(et.Size() * uintptr(vLen)))
|
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
eti := getTypeInfo(et)
|
|
|
|
return genHashSliceElements(eti)
|
|
|
|
case reflect.Array:
|
|
|
|
et := t.Elem()
|
|
|
|
eti := getTypeInfo(et)
|
|
|
|
return genHashArray(t, eti)
|
|
|
|
case reflect.Struct:
|
2022-08-27 03:50:56 +00:00
|
|
|
return genHashStructFields(t)
|
2022-08-24 08:31:01 +00:00
|
|
|
case reflect.Map:
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
v := p.asValue(t).Elem() // reflect.Map kind
|
2022-08-24 08:31:01 +00:00
|
|
|
if v.IsNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
2022-08-27 00:46:22 +00:00
|
|
|
return
|
2022-08-24 08:31:01 +00:00
|
|
|
}
|
|
|
|
if ti.isRecursive {
|
2022-08-27 19:30:35 +00:00
|
|
|
pm := v.UnsafePointer() // underlying pointer of map
|
|
|
|
if idx, ok := h.visitStack.seen(pm); ok {
|
2022-08-24 08:31:01 +00:00
|
|
|
h.HashUint8(2) // indicates cycle
|
|
|
|
h.HashUint64(uint64(idx))
|
2022-08-27 00:46:22 +00:00
|
|
|
return
|
2022-08-24 08:31:01 +00:00
|
|
|
}
|
2022-08-27 19:30:35 +00:00
|
|
|
h.visitStack.push(pm)
|
|
|
|
defer h.visitStack.pop(pm)
|
2022-08-24 08:31:01 +00:00
|
|
|
}
|
|
|
|
h.HashUint8(1) // indicates visiting a map
|
2022-08-27 19:30:35 +00:00
|
|
|
h.hashMap(v, ti)
|
2022-08-24 08:31:01 +00:00
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
case reflect.Pointer:
|
|
|
|
et := t.Elem()
|
2022-08-24 08:31:01 +00:00
|
|
|
eti := getTypeInfo(et)
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
pe := p.pointerElem()
|
|
|
|
if pe.isNil() {
|
2022-08-24 08:31:01 +00:00
|
|
|
h.HashUint8(0) // indicates nil
|
2022-08-27 00:46:22 +00:00
|
|
|
return
|
2022-08-24 08:31:01 +00:00
|
|
|
}
|
|
|
|
if ti.isRecursive {
|
2022-08-27 19:30:35 +00:00
|
|
|
if idx, ok := h.visitStack.seen(pe.p); ok {
|
2022-08-24 08:31:01 +00:00
|
|
|
h.HashUint8(2) // indicates cycle
|
|
|
|
h.HashUint64(uint64(idx))
|
2022-08-27 00:46:22 +00:00
|
|
|
return
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
2022-08-27 19:30:35 +00:00
|
|
|
h.visitStack.push(pe.p)
|
|
|
|
defer h.visitStack.pop(pe.p)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
2022-08-27 19:30:35 +00:00
|
|
|
h.HashUint8(1) // indicates visiting a pointer
|
|
|
|
eti.hasher()(h, pe)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
2022-08-24 08:31:01 +00:00
|
|
|
case reflect.Interface:
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
v := p.asValue(t).Elem() // reflect.Interface kind
|
2022-08-24 08:31:01 +00:00
|
|
|
if v.IsNil() {
|
|
|
|
h.HashUint8(0) // indicates nil
|
2022-08-27 00:46:22 +00:00
|
|
|
return
|
2022-08-24 08:31:01 +00:00
|
|
|
}
|
2022-08-27 19:30:35 +00:00
|
|
|
h.HashUint8(1) // visiting interface
|
|
|
|
v = v.Elem()
|
|
|
|
t := v.Type()
|
|
|
|
h.hashType(t)
|
|
|
|
va := reflect.New(t).Elem()
|
|
|
|
va.Set(v)
|
|
|
|
ti := getTypeInfo(t)
|
|
|
|
ti.hasher()(h, pointerOf(va.Addr()))
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
2022-08-24 08:31:01 +00:00
|
|
|
default: // Func, Chan, UnsafePointer
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(*hasher, pointer) {}
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
func (h *hasher) hashString(p pointer) {
|
|
|
|
s := *p.asString()
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashUint64(uint64(len(s)))
|
|
|
|
h.HashString(s)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-08-12 21:42:51 +00:00
|
|
|
// hashTimev hashes v, of kind time.Time.
|
2022-08-27 19:30:35 +00:00
|
|
|
func (h *hasher) hashTimev(p pointer) {
|
2022-08-12 21:42:51 +00:00
|
|
|
// Include the zone offset (but not the name) to keep
|
|
|
|
// Hash(t1) == Hash(t2) being semantically equivalent to
|
|
|
|
// t1.Format(time.RFC3339Nano) == t2.Format(time.RFC3339Nano).
|
2022-08-27 19:30:35 +00:00
|
|
|
t := *p.asTime()
|
2022-08-12 21:42:51 +00:00
|
|
|
_, offset := t.Zone()
|
|
|
|
h.HashUint64(uint64(t.Unix()))
|
|
|
|
h.HashUint32(uint32(t.Nanosecond()))
|
|
|
|
h.HashUint32(uint32(offset))
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 05:54:56 +00:00
|
|
|
// hashAddrv hashes v, of type netip.Addr.
|
2022-08-27 19:30:35 +00:00
|
|
|
func (h *hasher) hashAddrv(p pointer) {
|
2022-08-19 05:54:56 +00:00
|
|
|
// The formatting of netip.Addr covers the
|
|
|
|
// IP version, the address, and the optional zone name (for v6).
|
|
|
|
// This is equivalent to a1.MarshalBinary() == a2.MarshalBinary().
|
2022-08-27 19:30:35 +00:00
|
|
|
ip := *p.asAddr()
|
2022-08-19 05:54:56 +00:00
|
|
|
switch {
|
|
|
|
case !ip.IsValid():
|
|
|
|
h.HashUint64(0)
|
|
|
|
case ip.Is4():
|
|
|
|
b := ip.As4()
|
|
|
|
h.HashUint64(4)
|
|
|
|
h.HashUint32(binary.LittleEndian.Uint32(b[:]))
|
|
|
|
case ip.Is6():
|
|
|
|
b := ip.As16()
|
|
|
|
z := ip.Zone()
|
|
|
|
h.HashUint64(16 + uint64(len(z)))
|
|
|
|
h.HashUint64(binary.LittleEndian.Uint64(b[:8]))
|
|
|
|
h.HashUint64(binary.LittleEndian.Uint64(b[8:]))
|
|
|
|
h.HashString(z)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-27 03:50:56 +00:00
|
|
|
func makeMemHasher(n uintptr) typeHasherFunc {
|
2022-08-27 19:30:35 +00:00
|
|
|
return func(h *hasher, p pointer) {
|
|
|
|
h.HashBytes(p.asMemory(n))
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func genHashArrayElements(n int, eti *typeInfo) typeHasherFunc {
|
2022-08-27 19:30:35 +00:00
|
|
|
nb := eti.rtype.Size() // byte size of each array element
|
|
|
|
return func(h *hasher, p pointer) {
|
2022-06-15 05:49:11 +00:00
|
|
|
for i := 0; i < n; i++ {
|
2022-08-27 19:30:35 +00:00
|
|
|
pe := p.arrayIndex(i, nb)
|
|
|
|
eti.hasher()(h, pe)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func genHashArray(t reflect.Type, eti *typeInfo) typeHasherFunc {
|
|
|
|
n := t.Len()
|
|
|
|
return genHashArrayElements(n, eti)
|
|
|
|
}
|
|
|
|
|
|
|
|
func genHashSliceElements(eti *typeInfo) typeHasherFunc {
|
|
|
|
return sliceElementHasher{eti}.hash
|
|
|
|
}
|
|
|
|
|
|
|
|
type sliceElementHasher struct {
|
|
|
|
eti *typeInfo
|
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
func (seh sliceElementHasher) hash(h *hasher, p pointer) {
|
|
|
|
pa := p.sliceArray()
|
|
|
|
vLen := p.sliceLen()
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashUint64(uint64(vLen))
|
2022-08-27 19:30:35 +00:00
|
|
|
nb := seh.eti.rtype.Size()
|
2022-06-15 05:49:11 +00:00
|
|
|
for i := 0; i < vLen; i++ {
|
2022-08-27 19:30:35 +00:00
|
|
|
pe := pa.arrayIndex(i, nb)
|
|
|
|
seh.eti.hasher()(h, pe)
|
2022-06-15 05:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
func getTypeInfo(t reflect.Type) *typeInfo {
|
|
|
|
if f, ok := typeInfoMap.Load(t); ok {
|
|
|
|
return f.(*typeInfo)
|
|
|
|
}
|
|
|
|
typeInfoMapPopulate.Lock()
|
|
|
|
defer typeInfoMapPopulate.Unlock()
|
|
|
|
newTypes := map[reflect.Type]*typeInfo{}
|
|
|
|
ti := getTypeInfoLocked(t, newTypes)
|
|
|
|
for t, ti := range newTypes {
|
|
|
|
typeInfoMap.Store(t, ti)
|
2020-06-28 17:58:21 +00:00
|
|
|
}
|
2022-06-15 05:49:11 +00:00
|
|
|
return ti
|
|
|
|
}
|
2021-05-10 21:15:31 +00:00
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
func getTypeInfoLocked(t reflect.Type, incomplete map[reflect.Type]*typeInfo) *typeInfo {
|
|
|
|
if v, ok := typeInfoMap.Load(t); ok {
|
|
|
|
return v.(*typeInfo)
|
|
|
|
}
|
|
|
|
if ti, ok := incomplete[t]; ok {
|
|
|
|
return ti
|
|
|
|
}
|
|
|
|
ti := &typeInfo{
|
|
|
|
rtype: t,
|
|
|
|
isRecursive: typeIsRecursive(t),
|
|
|
|
}
|
|
|
|
incomplete[t] = ti
|
2021-07-07 05:37:32 +00:00
|
|
|
|
2022-06-15 05:49:11 +00:00
|
|
|
switch t.Kind() {
|
|
|
|
case reflect.Map:
|
|
|
|
ti.keyTypeInfo = getTypeInfoLocked(t.Key(), incomplete)
|
|
|
|
fallthrough
|
|
|
|
case reflect.Ptr, reflect.Slice, reflect.Array:
|
|
|
|
ti.elemTypeInfo = getTypeInfoLocked(t.Elem(), incomplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ti
|
|
|
|
}
|
|
|
|
|
2021-05-11 20:17:12 +00:00
|
|
|
type mapHasher struct {
|
2022-06-16 20:21:32 +00:00
|
|
|
h hasher
|
2022-08-11 07:33:40 +00:00
|
|
|
valKey, valElem valueCache // re-usable values for map iteration
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var mapHasherPool = &sync.Pool{
|
2022-03-16 23:27:57 +00:00
|
|
|
New: func() any { return new(mapHasher) },
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
type valueCache map[reflect.Type]reflect.Value
|
2021-05-18 16:20:52 +00:00
|
|
|
|
2022-08-27 19:30:35 +00:00
|
|
|
func (c *valueCache) get(t reflect.Type) reflect.Value {
|
2021-08-03 04:29:14 +00:00
|
|
|
v, ok := (*c)[t]
|
2021-05-18 16:20:52 +00:00
|
|
|
if !ok {
|
2022-08-27 19:30:35 +00:00
|
|
|
v = reflect.New(t).Elem()
|
2021-08-03 04:29:14 +00:00
|
|
|
if *c == nil {
|
|
|
|
*c = make(valueCache)
|
|
|
|
}
|
|
|
|
(*c)[t] = v
|
2021-05-18 16:20:52 +00:00
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2021-07-22 22:22:48 +00:00
|
|
|
// hashMap hashes a map in a sort-free manner.
|
|
|
|
// It relies on a map being a functionally an unordered set of KV entries.
|
|
|
|
// So long as we hash each KV entry together, we can XOR all
|
|
|
|
// of the individual hashes to produce a unique hash for the entire map.
|
2022-08-27 19:30:35 +00:00
|
|
|
func (h *hasher) hashMap(v reflect.Value, ti *typeInfo) {
|
2021-05-11 20:17:12 +00:00
|
|
|
mh := mapHasherPool.Get().(*mapHasher)
|
|
|
|
defer mapHasherPool.Put(mh)
|
2022-03-15 22:44:28 +00:00
|
|
|
|
2021-08-03 04:29:14 +00:00
|
|
|
var sum Sum
|
2022-06-16 20:21:32 +00:00
|
|
|
if v.IsNil() {
|
|
|
|
sum.sum[0] = 1 // something non-zero
|
|
|
|
}
|
|
|
|
|
|
|
|
k := mh.valKey.get(v.Type().Key())
|
|
|
|
e := mh.valElem.get(v.Type().Elem())
|
2021-08-03 04:29:14 +00:00
|
|
|
mh.h.visitStack = h.visitStack // always use the parent's visit stack to avoid cycles
|
2022-08-11 07:33:40 +00:00
|
|
|
for iter := v.MapRange(); iter.Next(); {
|
2022-03-15 22:44:28 +00:00
|
|
|
k.SetIterKey(iter)
|
|
|
|
e.SetIterValue(iter)
|
2022-08-12 00:44:09 +00:00
|
|
|
mh.h.Reset()
|
2022-08-27 19:30:35 +00:00
|
|
|
ti.keyTypeInfo.hasher()(&mh.h, pointerOf(k.Addr()))
|
|
|
|
ti.elemTypeInfo.hasher()(&mh.h, pointerOf(e.Addr()))
|
2021-08-03 04:29:14 +00:00
|
|
|
sum.xor(mh.h.sum())
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashBytes(append(h.scratch[:0], sum.sum[:]...)) // append into scratch to avoid heap allocation
|
2021-05-11 20:17:12 +00:00
|
|
|
}
|
|
|
|
|
2021-07-21 17:26:04 +00:00
|
|
|
// hashType hashes a reflect.Type.
|
|
|
|
// The hash is only consistent within the lifetime of a program.
|
|
|
|
func (h *hasher) hashType(t reflect.Type) {
|
|
|
|
// This approach relies on reflect.Type always being backed by a unique
|
|
|
|
// *reflect.rtype pointer. A safer approach is to use a global sync.Map
|
|
|
|
// that maps reflect.Type to some arbitrary and unique index.
|
|
|
|
// While safer, it requires global state with memory that can never be GC'd.
|
|
|
|
rtypeAddr := reflect.ValueOf(t).Pointer() // address of *reflect.rtype
|
2022-08-12 00:44:09 +00:00
|
|
|
h.HashUint64(uint64(rtypeAddr))
|
2021-07-21 17:26:04 +00:00
|
|
|
}
|