mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-29 04:55:31 +00:00
all: use slices.Collect with maps.Keys instead of xmaps.Keys
In Go 1.23, the standard maps.Keys helper was altered relative to xmaps.Keys to return and iterator, which can be used with slices.Collect. Also, Go 1.21 added the clear built-in, which replaces xmaps.Clear, and is semantically more correct with respect to NaNs. Updates #8632 Updates #12912 Updates #cleanup Signed-off-by: Joe Tsai <joetsai@digital-static.net>
This commit is contained in:
parent
c763b7a7db
commit
8f86d4f8b9
@ -12,13 +12,13 @@
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/views"
|
||||
@ -291,11 +291,11 @@ func (e *AppConnector) updateDomains(domains []string) {
|
||||
}
|
||||
}
|
||||
if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil {
|
||||
e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", xmaps.Keys(oldDomains), toRemove, err)
|
||||
e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slices.Collect(maps.Keys(oldDomains)), toRemove, err)
|
||||
}
|
||||
}
|
||||
|
||||
e.logf("handling domains: %v and wildcards: %v", xmaps.Keys(e.domains), e.wildcards)
|
||||
e.logf("handling domains: %v and wildcards: %v", slices.Collect(maps.Keys(e.domains)), e.wildcards)
|
||||
}
|
||||
|
||||
// updateRoutes merges the supplied routes into the currently configured routes. The routes supplied
|
||||
@ -354,7 +354,7 @@ func (e *AppConnector) Domains() views.Slice[string] {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
return views.SliceOf(xmaps.Keys(e.domains))
|
||||
return views.SliceOf(slices.Collect(maps.Keys(e.domains)))
|
||||
}
|
||||
|
||||
// DomainRoutes returns a map of domains to resolved IP
|
||||
|
@ -5,13 +5,13 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"tailscale.com/appc/appctest"
|
||||
"tailscale.com/tstest"
|
||||
@ -50,7 +50,7 @@ func TestUpdateDomains(t *testing.T) {
|
||||
// domains are explicitly downcased on set.
|
||||
a.UpdateDomains([]string{"UP.EXAMPLE.COM"})
|
||||
a.Wait(ctx)
|
||||
if got, want := xmaps.Keys(a.domains), []string{"up.example.com"}; !slices.Equal(got, want) {
|
||||
if got, want := slices.Collect(maps.Keys(a.domains)), []string{"up.example.com"}; !slices.Equal(got, want) {
|
||||
t.Errorf("got %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
@ -9,13 +9,13 @@
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/kballard/go-shellquote"
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tailcfg"
|
||||
@ -255,7 +255,7 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string)
|
||||
}
|
||||
|
||||
filteredExitNodes := filteredExitNodes{
|
||||
Countries: xmaps.Values(countries),
|
||||
Countries: slices.Collect(maps.Values(countries)),
|
||||
}
|
||||
|
||||
for _, country := range filteredExitNodes.Countries {
|
||||
|
@ -16,6 +16,7 @@
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"maps"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
@ -29,7 +30,6 @@
|
||||
"github.com/dave/courtney/tester"
|
||||
"github.com/dave/patsy"
|
||||
"github.com/dave/patsy/vos"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/cmd/testwrapper/flakytest"
|
||||
)
|
||||
|
||||
@ -343,7 +343,7 @@ type nextRun struct {
|
||||
if len(toRetry) == 0 {
|
||||
continue
|
||||
}
|
||||
pkgs := xmaps.Keys(toRetry)
|
||||
pkgs := slices.Collect(maps.Keys(toRetry))
|
||||
sort.Strings(pkgs)
|
||||
nextRun := &nextRun{
|
||||
attempt: thisRun.attempt + 1,
|
||||
|
@ -37,7 +37,6 @@
|
||||
|
||||
"go4.org/mem"
|
||||
"go4.org/netipx"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"tailscale.com/appc"
|
||||
@ -1925,7 +1924,7 @@ func (b *LocalBackend) DisablePortMapperForTest() {
|
||||
func (b *LocalBackend) PeersForTest() []tailcfg.NodeView {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
ret := xmaps.Values(b.peers)
|
||||
ret := slices.Collect(maps.Values(b.peers))
|
||||
slices.SortFunc(ret, func(a, b tailcfg.NodeView) int {
|
||||
return cmp.Compare(a.ID(), b.ID())
|
||||
})
|
||||
@ -7150,9 +7149,9 @@ type nodeDistance struct {
|
||||
// First, try to select an exit node that has the closest DERP home, based on lastReport's DERP latency.
|
||||
// If there are no latency values, it returns an arbitrary region
|
||||
if len(candidatesByRegion) > 0 {
|
||||
minRegion := minLatencyDERPRegion(xmaps.Keys(candidatesByRegion), report)
|
||||
minRegion := minLatencyDERPRegion(slices.Collect(maps.Keys(candidatesByRegion)), report)
|
||||
if minRegion == 0 {
|
||||
minRegion = selectRegion(views.SliceOf(xmaps.Keys(candidatesByRegion)))
|
||||
minRegion = selectRegion(views.SliceOf(slices.Collect(maps.Keys(candidatesByRegion))))
|
||||
}
|
||||
regionCandidates, ok := candidatesByRegion[minRegion]
|
||||
if !ok {
|
||||
|
@ -9,6 +9,7 @@
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"maps"
|
||||
"net"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
@ -18,7 +19,6 @@
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/control/controlknobs"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/net/dns/resolver"
|
||||
@ -203,7 +203,7 @@ func compileHostEntries(cfg Config) (hosts []*HostEntry) {
|
||||
if len(hostsMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
hosts = xmaps.Values(hostsMap)
|
||||
hosts = slices.Collect(maps.Values(hostsMap))
|
||||
slices.SortFunc(hosts, func(a, b *HostEntry) int {
|
||||
if len(a.Hosts) == 0 && len(b.Hosts) == 0 {
|
||||
return 0
|
||||
|
@ -5,12 +5,13 @@
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
@ -75,7 +76,7 @@ func TestStressEvictions(t *testing.T) {
|
||||
for len(vm) < numKeys {
|
||||
vm[rand.Uint64()] = true
|
||||
}
|
||||
vals := xmaps.Keys(vm)
|
||||
vals := slices.Collect(maps.Keys(vm))
|
||||
|
||||
c := Cache[uint64, bool]{
|
||||
MaxEntries: cacheSize,
|
||||
@ -106,7 +107,7 @@ func TestStressBatchedEvictions(t *testing.T) {
|
||||
for len(vm) < numKeys {
|
||||
vm[rand.Uint64()] = true
|
||||
}
|
||||
vals := xmaps.Keys(vm)
|
||||
vals := slices.Collect(maps.Keys(vm))
|
||||
|
||||
c := Cache[uint64, bool]{}
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
"testing"
|
||||
|
||||
qt "github.com/frankban/quicktest"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func pair[A, B any](a A, b B) (out struct {
|
||||
@ -130,7 +129,7 @@ func Benchmark(b *testing.B) {
|
||||
for range b.N {
|
||||
testMap[strings.ToLower(key)] = testValue
|
||||
}
|
||||
xmaps.Clear(testMap)
|
||||
clear(testMap)
|
||||
})
|
||||
b.Run("NoCase", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
@ -138,7 +137,7 @@ func Benchmark(b *testing.B) {
|
||||
for range b.N {
|
||||
Set(testMap, key, testValue)
|
||||
}
|
||||
xmaps.Clear(testMap)
|
||||
clear(testMap)
|
||||
})
|
||||
})
|
||||
b.Run("Delete", func(b *testing.B) {
|
||||
|
@ -5,11 +5,10 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/types/lazy"
|
||||
"tailscale.com/util/clientmetric"
|
||||
@ -268,7 +267,7 @@ func SetHooksForTest(tb internal.TB, addMetric, setMetric metricFn) {
|
||||
})
|
||||
|
||||
settingMetricsMu.Lock()
|
||||
oldSettingMetricsMap := xmaps.Clone(settingMetricsMap)
|
||||
oldSettingMetricsMap := maps.Clone(settingMetricsMap)
|
||||
clear(settingMetricsMap)
|
||||
settingMetricsMu.Unlock()
|
||||
tb.Cleanup(func() {
|
||||
|
@ -9,7 +9,6 @@
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/util/deephash"
|
||||
)
|
||||
|
||||
@ -24,7 +23,7 @@ type Snapshot struct {
|
||||
|
||||
// NewSnapshot returns a new [Snapshot] with the specified items and options.
|
||||
func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot {
|
||||
return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)}
|
||||
return &Snapshot{m: maps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)}
|
||||
}
|
||||
|
||||
// All returns an iterator over policy settings in s. The iteration order is not
|
||||
@ -164,7 +163,7 @@ func MergeSnapshots(snapshot1, snapshot2 *Snapshot) *Snapshot {
|
||||
return &Snapshot{snapshot2.m, snapshot2.sig, SummaryWith(summaryOpts...)}
|
||||
}
|
||||
m := make(map[Key]RawItem, snapshot1.Len()+snapshot2.Len())
|
||||
xmaps.Copy(m, snapshot1.m)
|
||||
xmaps.Copy(m, snapshot2.m) // snapshot2 has higher precedence
|
||||
maps.Copy(m, snapshot1.m)
|
||||
maps.Copy(m, snapshot2.m) // snapshot2 has higher precedence
|
||||
return &Snapshot{m, deephash.Hash(&m), SummaryWith(summaryOpts...)}
|
||||
}
|
||||
|
@ -5,10 +5,11 @@
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/util/syspolicy/internal"
|
||||
@ -294,7 +295,7 @@ func (s *TestStore) Suspend() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.suspendCount++; s.suspendCount == 1 {
|
||||
s.mw = xmaps.Clone(s.mr)
|
||||
s.mw = maps.Clone(s.mr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -421,7 +422,7 @@ func (s *TestStore) notifyPolicyChanged() {
|
||||
s.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
cbs := xmaps.Values(s.cbs)
|
||||
cbs := slices.Collect(maps.Values(s.cbs))
|
||||
s.mu.RUnlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -8,6 +8,7 @@
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/netip"
|
||||
"os"
|
||||
"slices"
|
||||
@ -18,7 +19,6 @@
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"go4.org/netipx"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"tailscale.com/net/flowtrack"
|
||||
"tailscale.com/net/ipset"
|
||||
"tailscale.com/net/packet"
|
||||
@ -997,7 +997,7 @@ func TestPeerCaps(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := xmaps.Keys(filt.CapsWithValues(netip.MustParseAddr(tt.src), netip.MustParseAddr(tt.dst)))
|
||||
got := slices.Collect(maps.Keys(filt.CapsWithValues(netip.MustParseAddr(tt.src), netip.MustParseAddr(tt.dst))))
|
||||
slices.Sort(got)
|
||||
slices.Sort(tt.want)
|
||||
if !slices.Equal(got, tt.want) {
|
||||
|
@ -9,6 +9,7 @@
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"math/rand/v2"
|
||||
"net"
|
||||
@ -20,7 +21,6 @@
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
"tailscale.com/disco"
|
||||
@ -586,7 +586,7 @@ func (de *endpoint) addrForWireGuardSendLocked(now mono.Time) (udpAddr netip.Add
|
||||
needPing := len(de.endpointState) > 1 && now.Sub(oldestPing) > wireguardPingInterval
|
||||
|
||||
if !udpAddr.IsValid() {
|
||||
candidates := xmaps.Keys(de.endpointState)
|
||||
candidates := slices.Collect(maps.Keys(de.endpointState))
|
||||
|
||||
// Randomly select an address to use until we retrieve latency information
|
||||
// and give it a short trustBestAddrUntil time so we avoid flapping between
|
||||
|
@ -12,6 +12,7 @@
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -19,6 +20,7 @@
|
||||
"net/netip"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -32,7 +34,6 @@
|
||||
"github.com/tailscale/wireguard-go/device"
|
||||
"github.com/tailscale/wireguard-go/tun/tuntest"
|
||||
"go4.org/mem"
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv4"
|
||||
"tailscale.com/cmd/testwrapper/flakytest"
|
||||
@ -1129,7 +1130,7 @@ func testTwoDevicePing(t *testing.T, d *devices) {
|
||||
}
|
||||
}
|
||||
t.Helper()
|
||||
t.Errorf("missing any connection to %s from %s", wantConns, xmaps.Keys(stats))
|
||||
t.Errorf("missing any connection to %s from %s", wantConns, slices.Collect(maps.Keys(stats)))
|
||||
}
|
||||
|
||||
addrPort := netip.MustParseAddrPort
|
||||
|
Loading…
Reference in New Issue
Block a user