mirror of
https://github.com/tailscale/tailscale.git
synced 2025-08-14 23:17:29 +00:00
all: use Go 1.22 range-over-int
Updates #11058 Change-Id: I35e7ef9b90e83cac04ca93fd964ad00ed5b48430 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:

committed by
Brad Fitzpatrick

parent
068db1f972
commit
7c1d6e35a5
@@ -313,7 +313,7 @@ func BenchmarkFilter(b *testing.B) {
|
||||
acl := newFilter(b.Logf)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
q := &packet.Parsed{}
|
||||
q.Decode(bench.packet)
|
||||
// This branch seems to have no measurable impact on performance.
|
||||
|
@@ -174,7 +174,7 @@ func TestEndpointTrackerMaxNum(t *testing.T) {
|
||||
// the endpointTracker, we will return all of them (even if they're for
|
||||
// the same address).
|
||||
var inputEps []tailcfg.Endpoint
|
||||
for i := 0; i < endpointTrackerMaxPerAddr+5; i++ {
|
||||
for i := range endpointTrackerMaxPerAddr + 5 {
|
||||
inputEps = append(inputEps, tailcfg.Endpoint{
|
||||
Addr: netip.AddrPortFrom(netip.MustParseAddr("1.2.3.4"), 10200+uint16(i)),
|
||||
Type: tailcfg.EndpointSTUN,
|
||||
|
@@ -453,7 +453,7 @@ func TestPickDERPFallback(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that it's consistent.
|
||||
for i := 0; i < 50; i++ {
|
||||
for range 50 {
|
||||
b := c.pickDERPFallback()
|
||||
if a != b {
|
||||
t.Fatalf("got inconsistent %d vs %d values", a, b)
|
||||
@@ -463,7 +463,7 @@ func TestPickDERPFallback(t *testing.T) {
|
||||
// Test that that the pointer value of c is blended in and
|
||||
// distribution over nodes works.
|
||||
got := map[int]int{}
|
||||
for i := 0; i < 50; i++ {
|
||||
for range 50 {
|
||||
c = newConn()
|
||||
c.derpMap = dm
|
||||
got[c.pickDERPFallback()]++
|
||||
@@ -1408,7 +1408,7 @@ func TestReceiveFromAllocs(t *testing.T) {
|
||||
|
||||
func BenchmarkReceiveFrom(b *testing.B) {
|
||||
roundTrip := setUpReceiveFrom(b)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
roundTrip()
|
||||
}
|
||||
}
|
||||
@@ -1435,7 +1435,7 @@ func BenchmarkReceiveFrom_Native(b *testing.B) {
|
||||
}
|
||||
|
||||
buf := make([]byte, 2<<10)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil {
|
||||
b.Fatalf("WriteTo: %v", err)
|
||||
}
|
||||
@@ -1484,7 +1484,7 @@ func TestSetNetworkMapChangingNodeKey(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
conn.SetNetworkMap(&netmap.NetworkMap{
|
||||
Peers: nodeViews([]*tailcfg.Node{
|
||||
{
|
||||
@@ -1567,13 +1567,13 @@ func TestRebindStress(t *testing.T) {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 2000; i++ {
|
||||
for range 2000 {
|
||||
conn.Rebind()
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 2000; i++ {
|
||||
for range 2000 {
|
||||
conn.Rebind()
|
||||
}
|
||||
}()
|
||||
@@ -1827,7 +1827,7 @@ func TestStressSetNetworkMap(t *testing.T) {
|
||||
prng := rand.New(rand.NewSource(int64(seed)))
|
||||
|
||||
const iters = 1000 // approx 0.5s on an m1 mac
|
||||
for i := 0; i < iters; i++ {
|
||||
for range iters {
|
||||
for j := 0; j < npeers; j++ {
|
||||
// Randomize which peers are present.
|
||||
if prng.Int()&1 == 0 {
|
||||
@@ -2215,7 +2215,7 @@ func Test_batchingUDPConn_coalesceMessages(t *testing.T) {
|
||||
if got != len(tt.wantLens) {
|
||||
t.Fatalf("got len %d want: %d", got, len(tt.wantLens))
|
||||
}
|
||||
for i := 0; i < got; i++ {
|
||||
for i := range got {
|
||||
if msgs[i].Addr != addr {
|
||||
t.Errorf("msgs[%d].Addr != passed addr", i)
|
||||
}
|
||||
|
@@ -75,7 +75,7 @@ func TestInjectInboundLeak(t *testing.T) {
|
||||
pkt := &packet.Parsed{}
|
||||
const N = 10_000
|
||||
ms0 := getMemStats()
|
||||
for i := 0; i < N; i++ {
|
||||
for range N {
|
||||
outcome := ns.injectInbound(pkt, tunWrap)
|
||||
if outcome != filter.DropSilently {
|
||||
t.Fatalf("got outcome %v; want DropSilently", outcome)
|
||||
|
@@ -265,7 +265,7 @@ func configureInterface(cfg *Config, tun *tun.NativeTun) (retErr error) {
|
||||
// new interface has come up. Poll periodically until it
|
||||
// does.
|
||||
const tries = 20
|
||||
for i := 0; i < tries; i++ {
|
||||
for i := range tries {
|
||||
found, err := setPrivateNetwork(luid)
|
||||
if err != nil {
|
||||
networkCategoryWarning.Set(fmt.Errorf("set-network-category: %w", err))
|
||||
@@ -398,7 +398,7 @@ func configureInterface(cfg *Config, tun *tun.NativeTun) (retErr error) {
|
||||
slices.SortFunc(routes, (*routeData).Compare)
|
||||
|
||||
deduplicatedRoutes := []*routeData{}
|
||||
for i := 0; i < len(routes); i++ {
|
||||
for i := range len(routes) {
|
||||
// There's only one way to get to a given IP+Mask, so delete
|
||||
// all matches after the first.
|
||||
if i > 0 && routes[i].Destination == routes[i-1].Destination {
|
||||
|
@@ -82,7 +82,7 @@ func TestRouteLess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRouteDataLessConsistent(t *testing.T) {
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
ri := randRouteData()
|
||||
rj := randRouteData()
|
||||
if ri.Less(rj) && rj.Less(ri) {
|
||||
|
@@ -909,7 +909,7 @@ func TestDelRouteIdempotent(t *testing.T) {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
if err := lt.r.delRoute(cidr); err != nil {
|
||||
t.Errorf("delRoute(i=%d): %v", i, err)
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ func TestConfigEqual(t *testing.T) {
|
||||
}
|
||||
configType := reflect.TypeFor[Config]()
|
||||
configFields := []string{}
|
||||
for i := 0; i < configType.NumField(); i++ {
|
||||
for i := range configType.NumField() {
|
||||
configFields = append(configFields, configType.Field(i).Name)
|
||||
}
|
||||
if !reflect.DeepEqual(configFields, testedFields) {
|
||||
|
@@ -164,7 +164,7 @@ func TestUserspaceEnginePortReconfig(t *testing.T) {
|
||||
|
||||
// Keep making a wgengine until we find an unused port
|
||||
var ue *userspaceEngine
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
attempt := uint16(defaultPort + i)
|
||||
e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs)
|
||||
if err != nil {
|
||||
@@ -335,7 +335,7 @@ func BenchmarkGenLocalAddrFunc(b *testing.B) {
|
||||
m := map[netip.Addr]bool{
|
||||
la1: true,
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
x = m[la1]
|
||||
x = m[lanot]
|
||||
}
|
||||
@@ -347,7 +347,7 @@ func BenchmarkGenLocalAddrFunc(b *testing.B) {
|
||||
la1: true,
|
||||
la2: true,
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
x = m[la1]
|
||||
x = m[lanot]
|
||||
}
|
||||
@@ -358,7 +358,7 @@ func BenchmarkGenLocalAddrFunc(b *testing.B) {
|
||||
f := func(t netip.Addr) bool {
|
||||
return t == la1
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
x = f(la1)
|
||||
x = f(lanot)
|
||||
}
|
||||
@@ -369,7 +369,7 @@ func BenchmarkGenLocalAddrFunc(b *testing.B) {
|
||||
f := func(t netip.Addr) bool {
|
||||
return t == la1 || t == la2
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
x = f(la1)
|
||||
x = f(lanot)
|
||||
}
|
||||
|
@@ -85,7 +85,7 @@ func BenchmarkFromUAPI(b *testing.B) {
|
||||
w.Flush()
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
r.Seek(0, io.SeekStart)
|
||||
_, err := FromUAPI(r)
|
||||
if err != nil {
|
||||
|
@@ -93,7 +93,7 @@ func BenchmarkSetPeers(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
x := wglog.NewLogger(logger.Discard)
|
||||
peers := [][]wgcfg.Peer{genPeers(0), genPeers(15), genPeers(16), genPeers(15)}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
for _, p := range peers {
|
||||
x.SetPeers(p)
|
||||
}
|
||||
|
Reference in New Issue
Block a user