mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-28 20:45:34 +00:00
all: use Go 1.22 range-over-int
Updates #11058 Change-Id: I35e7ef9b90e83cac04ca93fd964ad00ed5b48430 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
parent
068db1f972
commit
7c1d6e35a5
@ -223,7 +223,7 @@ func (s *Server) awaitUserAuth(ctx context.Context, session *browserSession) err
|
||||
|
||||
func (s *Server) newSessionID() (string, error) {
|
||||
raw := make([]byte, 16)
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
if _, err := rand.Read(raw); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -436,7 +436,7 @@ func (up *Updater) updateDebLike() error {
|
||||
return fmt.Errorf("apt-get update failed: %w; output:\n%s", err, out)
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
out, err := exec.Command("apt-get", "install", "--yes", "--allow-downgrades", "tailscale="+ver).CombinedOutput()
|
||||
if err != nil {
|
||||
if !bytes.Contains(out, []byte(`dpkg was interrupted`)) {
|
||||
|
@ -663,7 +663,7 @@ func genTarball(t *testing.T, path string, files map[string]string) {
|
||||
|
||||
func TestWriteFileOverwrite(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "test")
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
content := fmt.Sprintf("content %d", i)
|
||||
if err := writeFile(strings.NewReader(content), path, 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -445,7 +445,7 @@ type testServer struct {
|
||||
|
||||
func newTestServer(t *testing.T) *testServer {
|
||||
var roots []rootKeyPair
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
roots = append(roots, newRootKeyPair(t))
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) {
|
||||
writef("}")
|
||||
writef("dst := new(%s)", name)
|
||||
writef("*dst = *src")
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
for i := range t.NumFields() {
|
||||
fname := t.Field(i).Name()
|
||||
ft := t.Field(i).Type()
|
||||
if !codegen.ContainsPointers(ft) || codegen.HasNoClone(t.Tag(i)) {
|
||||
|
@ -522,7 +522,7 @@ func expectEvents(t *testing.T, rec *record.FakeRecorder, wantsEvents []string)
|
||||
t.Helper()
|
||||
// Events are not expected to arrive in order.
|
||||
seenEvents := make([]string, 0)
|
||||
for i := 0; i < len(wantsEvents); i++ {
|
||||
for range len(wantsEvents) {
|
||||
timer := time.NewTimer(time.Second * 5)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
|
@ -314,7 +314,7 @@ func mustMakeNamesByAddr() map[netip.Addr]string {
|
||||
seen := make(map[string]bool)
|
||||
namesByAddr := make(map[netip.Addr]string)
|
||||
retry:
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
clear(seen)
|
||||
clear(namesByAddr)
|
||||
for _, d := range m.Devices {
|
||||
@ -354,7 +354,7 @@ func fieldPrefix(s string, n int) string {
|
||||
}
|
||||
|
||||
func appendRepeatByte(b []byte, c byte, n int) []byte {
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
b = append(b, c)
|
||||
}
|
||||
return b
|
||||
|
@ -88,7 +88,7 @@ func main() {
|
||||
|
||||
go func() {
|
||||
// wait for tailscale to start before trying to fetch cert names
|
||||
for i := 0; i < 60; i++ {
|
||||
for range 60 {
|
||||
st, err := localClient.Status(context.Background())
|
||||
if err != nil {
|
||||
log.Printf("error retrieving tailscale status; retrying: %v", err)
|
||||
|
@ -158,7 +158,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotConfigured := false
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
s, err := l.StatusWithoutPeers(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -831,7 +831,7 @@ func TestPrefFlagMapping(t *testing.T) {
|
||||
}
|
||||
|
||||
prefType := reflect.TypeFor[ipn.Prefs]()
|
||||
for i := 0; i < prefType.NumField(); i++ {
|
||||
for i := range prefType.NumField() {
|
||||
prefName := prefType.Field(i).Name
|
||||
if prefHasFlag[prefName] {
|
||||
continue
|
||||
|
@ -148,7 +148,7 @@ func runNetworkLockInit(ctx context.Context, args []string) error {
|
||||
}
|
||||
|
||||
fmt.Printf("%d disablement secrets have been generated and are printed below. Take note of them now, they WILL NOT be shown again.\n", nlInitArgs.numDisablements)
|
||||
for i := 0; i < nlInitArgs.numDisablements; i++ {
|
||||
for range nlInitArgs.numDisablements {
|
||||
var secret [32]byte
|
||||
if _, err := rand.Read(secret[:]); err != nil {
|
||||
return err
|
||||
|
@ -387,7 +387,7 @@ func isProxyTarget(source string) bool {
|
||||
// allNumeric reports whether s only comprises of digits
|
||||
// and has at least one digit.
|
||||
func allNumeric(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
return false
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ func main() {
|
||||
DERPMap: derpMap,
|
||||
ExplicitBaseURL: "http://127.0.0.1:9911",
|
||||
}
|
||||
for i := 0; i < *flagNFake; i++ {
|
||||
for range *flagNFake {
|
||||
control.AddFakeNode()
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
|
@ -149,7 +149,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi
|
||||
}
|
||||
}
|
||||
writeTemplate("common")
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
for i := range t.NumFields() {
|
||||
f := t.Field(i)
|
||||
fname := f.Name()
|
||||
if !f.Exported() {
|
||||
@ -292,7 +292,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi
|
||||
}
|
||||
writeTemplate("unsupportedField")
|
||||
}
|
||||
for i := 0; i < typ.NumMethods(); i++ {
|
||||
for i := range typ.NumMethods() {
|
||||
f := typ.Method(i)
|
||||
if !f.Exported() {
|
||||
continue
|
||||
|
@ -91,7 +91,7 @@ func TestFastPath(t *testing.T) {
|
||||
|
||||
const packets = 10
|
||||
s := "test"
|
||||
for i := 0; i < packets; i++ {
|
||||
for range packets {
|
||||
// Many separate writes, to force separate Noise frames that
|
||||
// all get buffered up and then all sent as a single slice to
|
||||
// the server.
|
||||
@ -251,7 +251,7 @@ func TestConnMemoryOverhead(t *testing.T) {
|
||||
}
|
||||
defer closeAll()
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
for range num {
|
||||
client, server := pair(t)
|
||||
closers = append(closers, client, server)
|
||||
go func() {
|
||||
|
@ -64,7 +64,7 @@ func TestNoReuse(t *testing.T) {
|
||||
serverHandshakes = map[[48]byte]bool{}
|
||||
packets = map[[32]byte]bool{}
|
||||
)
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
var (
|
||||
clientRaw, serverRaw = memnet.NewConn("noise", 128000)
|
||||
clientBuf, serverBuf bytes.Buffer
|
||||
@ -162,7 +162,7 @@ func (r *tamperReader) Read(bs []byte) (int, error) {
|
||||
|
||||
func TestTampering(t *testing.T) {
|
||||
// Tamper with every byte of the client initiation message.
|
||||
for i := 0; i < 101; i++ {
|
||||
for i := range 101 {
|
||||
var (
|
||||
clientConn, serverRaw = memnet.NewConn("noise", 128000)
|
||||
serverConn = &readerConn{serverRaw, &tamperReader{serverRaw, i, 0}}
|
||||
@ -190,7 +190,7 @@ func TestTampering(t *testing.T) {
|
||||
}
|
||||
|
||||
// Tamper with every byte of the server response message.
|
||||
for i := 0; i < 51; i++ {
|
||||
for i := range 51 {
|
||||
var (
|
||||
clientRaw, serverConn = memnet.NewConn("noise", 128000)
|
||||
clientConn = &readerConn{clientRaw, &tamperReader{clientRaw, i, 0}}
|
||||
@ -215,7 +215,7 @@ func TestTampering(t *testing.T) {
|
||||
}
|
||||
|
||||
// Tamper with every byte of the first server>client transport message.
|
||||
for i := 0; i < 30; i++ {
|
||||
for i := range 30 {
|
||||
var (
|
||||
clientRaw, serverConn = memnet.NewConn("noise", 128000)
|
||||
clientConn = &readerConn{clientRaw, &tamperReader{clientRaw, 51 + i, 0}}
|
||||
@ -256,7 +256,7 @@ func TestTampering(t *testing.T) {
|
||||
}
|
||||
|
||||
// Tamper with every byte of the first client>server transport message.
|
||||
for i := 0; i < 30; i++ {
|
||||
for i := range 30 {
|
||||
var (
|
||||
clientConn, serverRaw = memnet.NewConn("noise", 128000)
|
||||
serverConn = &readerConn{serverRaw, &tamperReader{serverRaw, 101 + i, 0}}
|
||||
|
@ -9,7 +9,7 @@
|
||||
)
|
||||
|
||||
func fieldsOf(t reflect.Type) (fields []string) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
for i := range t.NumField() {
|
||||
if name := t.Field(i).Name; name != "_" {
|
||||
fields = append(fields, name)
|
||||
}
|
||||
|
@ -563,7 +563,7 @@ func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) {
|
||||
func getNodeFields() []string {
|
||||
rt := reflect.TypeFor[tailcfg.Node]()
|
||||
ret := make([]string, rt.NumField())
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
ret[i] = rt.Field(i).Name
|
||||
}
|
||||
return ret
|
||||
|
@ -1019,7 +1019,7 @@ func BenchmarkMapSessionDelta(b *testing.B) {
|
||||
Name: "foo.bar.ts.net.",
|
||||
},
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
for i := range size {
|
||||
res.Peers = append(res.Peers, &tailcfg.Node{
|
||||
ID: tailcfg.NodeID(i + 2),
|
||||
Name: fmt.Sprintf("peer%d.bar.ts.net.", i),
|
||||
@ -1046,7 +1046,7 @@ func BenchmarkMapSessionDelta(b *testing.B) {
|
||||
|
||||
// Now for the core of the benchmark loop, just toggle
|
||||
// a single node's online status.
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := range b.N {
|
||||
if err := ms.HandleNonKeepAliveMapResponse(ctx, &tailcfg.MapResponse{
|
||||
OnlineChange: map[tailcfg.NodeID]bool{
|
||||
2: i%2 == 0,
|
||||
|
@ -729,7 +729,7 @@ func (d *closeTrackDialer) Done() {
|
||||
// Sleep/wait a few times on the assumption that things will close
|
||||
// "eventually".
|
||||
const iters = 100
|
||||
for i := 0; i < iters; i++ {
|
||||
for i := range iters {
|
||||
d.mu.Lock()
|
||||
if len(d.conns) == 0 {
|
||||
d.mu.Unlock()
|
||||
|
@ -56,7 +56,7 @@ func TestSendRecv(t *testing.T) {
|
||||
const numClients = 3
|
||||
var clientPrivateKeys []key.NodePrivate
|
||||
var clientKeys []key.NodePublic
|
||||
for i := 0; i < numClients; i++ {
|
||||
for range numClients {
|
||||
priv := key.NewNode()
|
||||
clientPrivateKeys = append(clientPrivateKeys, priv)
|
||||
clientKeys = append(clientKeys, priv.Public())
|
||||
@ -73,7 +73,7 @@ func TestSendRecv(t *testing.T) {
|
||||
var recvChs []chan []byte
|
||||
errCh := make(chan error, 3)
|
||||
|
||||
for i := 0; i < numClients; i++ {
|
||||
for i := range numClients {
|
||||
t.Logf("Connecting client %d ...", i)
|
||||
cout, err := net.Dial("tcp", ln.Addr().String())
|
||||
if err != nil {
|
||||
@ -111,7 +111,7 @@ func TestSendRecv(t *testing.T) {
|
||||
var peerGoneCountNotHere expvar.Int
|
||||
|
||||
t.Logf("Starting read loops")
|
||||
for i := 0; i < numClients; i++ {
|
||||
for i := range numClients {
|
||||
go func(i int) {
|
||||
for {
|
||||
m, err := clients[i].Recv()
|
||||
@ -233,7 +233,7 @@ func TestSendRecv(t *testing.T) {
|
||||
wantUnknownPeers(1)
|
||||
|
||||
// PeerGoneNotHere is rate-limited to 3 times a second
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
if err := clients[1].Send(neKey, callMe); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -389,7 +389,7 @@ func TestSendFreeze(t *testing.T) {
|
||||
// if any tokens remain in the channel, they
|
||||
// must have been generated after drainAny was
|
||||
// called.
|
||||
for i := 0; i < cap(ch); i++ {
|
||||
for range cap(ch) {
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
@ -456,7 +456,7 @@ func TestSendFreeze(t *testing.T) {
|
||||
aliceConn.Close()
|
||||
cathyConn.Close()
|
||||
|
||||
for i := 0; i < cap(errCh); i++ {
|
||||
for range cap(errCh) {
|
||||
err := <-errCh
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) {
|
||||
@ -891,7 +891,7 @@ func TestMultiForwarder(t *testing.T) {
|
||||
// run long enough concurrently with {Add,Remove}PacketForwarder loop above.
|
||||
numMsgs := 5000
|
||||
var fwd PacketForwarder
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
for i := range numMsgs {
|
||||
s.mu.Lock()
|
||||
fwd = s.clientsMesh[u]
|
||||
s.mu.Unlock()
|
||||
@ -1288,7 +1288,7 @@ func TestServerDupClients(t *testing.T) {
|
||||
|
||||
func TestLimiter(t *testing.T) {
|
||||
rl := rate.NewLimiter(rate.Every(time.Minute), 100)
|
||||
for i := 0; i < 200; i++ {
|
||||
for i := range 200 {
|
||||
r := rl.Reserve()
|
||||
d := r.Delay()
|
||||
t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d)
|
||||
@ -1352,7 +1352,7 @@ func benchmarkSendRecvSize(b *testing.B, packetSize int) {
|
||||
b.SetBytes(int64(len(msg)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
if err := client.Send(clientKey, msg); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -1363,7 +1363,7 @@ func BenchmarkWriteUint32(b *testing.B) {
|
||||
w := bufio.NewWriter(io.Discard)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
writeUint32(w, 0x0ba3a)
|
||||
}
|
||||
}
|
||||
@ -1381,7 +1381,7 @@ func BenchmarkReadUint32(b *testing.B) {
|
||||
var err error
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
sinkU32, err = readUint32(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -1454,7 +1454,7 @@ func TestClientSendRateLimiting(t *testing.T) {
|
||||
|
||||
// Flood should all succeed.
|
||||
cw.ResetStats()
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
if err := c.send(key.NodePublic{}, pkt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1473,7 +1473,7 @@ func TestClientSendRateLimiting(t *testing.T) {
|
||||
TokenBucketBytesPerSecond: 1,
|
||||
TokenBucketBytesBurst: int(bytes1 * 2),
|
||||
})
|
||||
for i := 0; i < 1000; i++ {
|
||||
for range 1000 {
|
||||
if err := c.send(key.NodePublic{}, pkt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func TestSendRecv(t *testing.T) {
|
||||
const numClients = 3
|
||||
var clientPrivateKeys []key.NodePrivate
|
||||
var clientKeys []key.NodePublic
|
||||
for i := 0; i < numClients; i++ {
|
||||
for range numClients {
|
||||
priv := key.NewNode()
|
||||
clientPrivateKeys = append(clientPrivateKeys, priv)
|
||||
clientKeys = append(clientKeys, priv.Public())
|
||||
@ -66,7 +66,7 @@ func TestSendRecv(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
for i := 0; i < numClients; i++ {
|
||||
for i := range numClients {
|
||||
key := clientPrivateKeys[i]
|
||||
c, err := NewClient(key, serverURL, t.Logf)
|
||||
if err != nil {
|
||||
@ -311,7 +311,7 @@ func TestBreakWatcherConnRecv(t *testing.T) {
|
||||
|
||||
// Wait for the watcher to run, then break the connection and check if it
|
||||
// reconnected and received peer updates.
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
select {
|
||||
case peers := <-watcherChan:
|
||||
if peers != 1 {
|
||||
@ -384,7 +384,7 @@ func TestBreakWatcherConn(t *testing.T) {
|
||||
|
||||
// Wait for the watcher to run, then break the connection and check if it
|
||||
// reconnected and received peer updates.
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
select {
|
||||
case peers := <-watcherChan:
|
||||
if peers != 1 {
|
||||
|
@ -15,7 +15,7 @@
|
||||
func TestAppendWarnableDebugFlags(t *testing.T) {
|
||||
resetWarnables()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
w := NewWarnable(WithMapDebugFlag(fmt.Sprint(i)))
|
||||
if i%2 == 0 {
|
||||
w.Set(errors.New("boom"))
|
||||
@ -25,7 +25,7 @@ func TestAppendWarnableDebugFlags(t *testing.T) {
|
||||
want := []string{"z", "y", "0", "2", "4", "6", "8"}
|
||||
|
||||
var got []string
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
got = append(got[:0], "z", "y")
|
||||
got = AppendWarnableDebugFlags(got)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
|
@ -93,7 +93,7 @@ func (b *LocalBackend) driveSetShareLocked(share *drive.Share) (views.SliceView[
|
||||
|
||||
addedShare := false
|
||||
var shares []*drive.Share
|
||||
for i := 0; i < existingShares.Len(); i++ {
|
||||
for i := range existingShares.Len() {
|
||||
existing := existingShares.At(i)
|
||||
if existing.Name() != share.Name {
|
||||
if !addedShare && existing.Name() > share.Name {
|
||||
@ -152,7 +152,7 @@ func (b *LocalBackend) driveRenameShareLocked(oldName, newName string) (views.Sl
|
||||
|
||||
found := false
|
||||
var shares []*drive.Share
|
||||
for i := 0; i < existingShares.Len(); i++ {
|
||||
for i := range existingShares.Len() {
|
||||
existing := existingShares.At(i)
|
||||
if existing.Name() == newName {
|
||||
return existingShares, os.ErrExist
|
||||
@ -213,7 +213,7 @@ func (b *LocalBackend) driveRemoveShareLocked(name string) (views.SliceView[*dri
|
||||
|
||||
found := false
|
||||
var shares []*drive.Share
|
||||
for i := 0; i < existingShares.Len(); i++ {
|
||||
for i := range existingShares.Len() {
|
||||
existing := existingShares.At(i)
|
||||
if existing.Name() != name {
|
||||
shares = append(shares, existing.AsStruct())
|
||||
@ -281,7 +281,7 @@ func driveShareViewsEqual(a *views.SliceView[*drive.Share, drive.ShareView], b v
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < a.Len(); i++ {
|
||||
for i := range a.Len() {
|
||||
if !drive.ShareViewsEqual(a.At(i), b.At(i)) {
|
||||
return false
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
||||
currentShares := b.pm.prefs.DriveShares()
|
||||
if currentShares.Len() > 0 {
|
||||
var shares []*drive.Share
|
||||
for i := 0; i < currentShares.Len(); i++ {
|
||||
for i := range currentShares.Len() {
|
||||
shares = append(shares, currentShares.At(i).AsStruct())
|
||||
}
|
||||
fs.SetShares(shares)
|
||||
@ -1855,7 +1855,7 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P
|
||||
}
|
||||
if prefs.Valid() {
|
||||
ar := prefs.AdvertiseRoutes()
|
||||
for i := 0; i < ar.Len(); i++ {
|
||||
for i := range ar.Len() {
|
||||
r := ar.At(i)
|
||||
if r.Bits() == 0 {
|
||||
// When offering a default route to the world, we
|
||||
@ -5418,7 +5418,7 @@ func (b *LocalBackend) OfferingExitNode() bool {
|
||||
}
|
||||
var def4, def6 bool
|
||||
ar := b.pm.CurrentPrefs().AdvertiseRoutes()
|
||||
for i := 0; i < ar.Len(); i++ {
|
||||
for i := range ar.Len() {
|
||||
r := ar.At(i)
|
||||
if r.Bits() != 0 {
|
||||
continue
|
||||
|
@ -949,7 +949,7 @@ func TestUpdateNetmapDelta(t *testing.T) {
|
||||
}
|
||||
|
||||
b.netMap = &netmap.NetworkMap{}
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
b.netMap.Peers = append(b.netMap.Peers, (&tailcfg.Node{ID: (tailcfg.NodeID(i) + 1)}).View())
|
||||
}
|
||||
b.updatePeersFromNetmapLocked(b.netMap)
|
||||
|
@ -39,7 +39,7 @@ func TestLocalLogLines(t *testing.T) {
|
||||
|
||||
logid := func(hex byte) logid.PublicID {
|
||||
var ret logid.PublicID
|
||||
for i := 0; i < len(ret); i++ {
|
||||
for i := range len(ret) {
|
||||
ret[i] = hex
|
||||
}
|
||||
return ret
|
||||
|
@ -338,7 +338,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per
|
||||
}
|
||||
bootstrapStateID := fmt.Sprintf("%d:%d", genesis.State.StateID1, genesis.State.StateID2)
|
||||
|
||||
for i := 0; i < persist.DisallowedTKAStateIDs().Len(); i++ {
|
||||
for i := range persist.DisallowedTKAStateIDs().Len() {
|
||||
stateID := persist.DisallowedTKAStateIDs().At(i)
|
||||
if stateID == bootstrapStateID {
|
||||
return fmt.Errorf("TKA with stateID of %q is disallowed on this node", stateID)
|
||||
@ -439,7 +439,7 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
|
||||
}
|
||||
|
||||
filtered := make([]*ipnstate.TKAFilteredPeer, len(b.tka.filtered))
|
||||
for i := 0; i < len(filtered); i++ {
|
||||
for i := range len(filtered) {
|
||||
filtered[i] = b.tka.filtered[i].Clone()
|
||||
}
|
||||
|
||||
@ -765,7 +765,7 @@ func (b *LocalBackend) NetworkLockLog(maxEntries int) ([]ipnstate.NetworkLockUpd
|
||||
|
||||
var out []ipnstate.NetworkLockUpdate
|
||||
cursor := b.tka.authority.Head()
|
||||
for i := 0; i < maxEntries; i++ {
|
||||
for range maxEntries {
|
||||
aum, err := b.tka.storage.AUM(cursor)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
|
@ -109,7 +109,7 @@ func fileHasContents(name string, want string) check {
|
||||
|
||||
func hexAll(v string) string {
|
||||
var sb strings.Builder
|
||||
for i := 0; i < len(v); i++ {
|
||||
for i := range len(v) {
|
||||
fmt.Fprintf(&sb, "%%%02x", v[i])
|
||||
}
|
||||
return sb.String()
|
||||
@ -604,7 +604,7 @@ func TestFileDeleteRace(t *testing.T) {
|
||||
ps: ps,
|
||||
}
|
||||
buf := make([]byte, 2<<20)
|
||||
for i := 0; i < 30; i++ {
|
||||
for range 30 {
|
||||
rr := httptest.NewRecorder()
|
||||
ph.ServeHTTP(rr, httptest.NewRequest("PUT", "http://100.100.100.101:123/v0/put/foo.txt", bytes.NewReader(buf[:rand.Intn(len(buf))])))
|
||||
if res := rr.Result(); res.StatusCode != 200 {
|
||||
|
@ -868,7 +868,7 @@ func expandProxyArg(s string) (targetURL string, insecureSkipVerify bool) {
|
||||
}
|
||||
|
||||
func allNumeric(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
return false
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify {
|
||||
nt.mu.Unlock()
|
||||
|
||||
nn := []ipn.Notify{}
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
select {
|
||||
case n := <-ch:
|
||||
nn = append(nn, n)
|
||||
@ -1039,7 +1039,7 @@ func TestWGEngineStatusRace(t *testing.T) {
|
||||
// we would end up in state ipn.Running.
|
||||
// The same should thus be true if these callbacks occur concurrently.
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
@ -342,7 +342,7 @@ func userIDFromString(v string) string {
|
||||
}
|
||||
|
||||
func isAllDigit(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if b := s[i]; b < '0' || b > '9' {
|
||||
return false
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) {
|
||||
// Next, repeatedly get the server key to see if the node is
|
||||
// behind a load balancer (incorrectly).
|
||||
serverPubKeys := make(map[key.NodePublic]bool)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
func() {
|
||||
rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.netMon, func() *tailcfg.DERPRegion {
|
||||
return &tailcfg.DERPRegion{
|
||||
|
@ -903,7 +903,7 @@ type result struct {
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
for i := 0; i < len(dialers); i++ {
|
||||
for range len(dialers) {
|
||||
res := <-results
|
||||
fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err)
|
||||
if res.conn != nil {
|
||||
|
@ -363,7 +363,7 @@ func applyPrefsEdits(src, dst reflect.Value, mask map[string]reflect.Value) {
|
||||
|
||||
func maskFields(v reflect.Value) map[string]reflect.Value {
|
||||
mask := make(map[string]reflect.Value)
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
for i := range v.NumField() {
|
||||
f := v.Type().Field(i).Name
|
||||
if !strings.HasSuffix(f, "Set") {
|
||||
continue
|
||||
|
@ -26,7 +26,7 @@
|
||||
)
|
||||
|
||||
func fieldsOf(t reflect.Type) (fields []string) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
for i := range t.NumField() {
|
||||
fields = append(fields, t.Field(i).Name)
|
||||
}
|
||||
return
|
||||
@ -661,7 +661,7 @@ func TestMaskedPrefsFields(t *testing.T) {
|
||||
// ApplyEdits assumes.
|
||||
pt := reflect.TypeFor[Prefs]()
|
||||
mt := reflect.TypeFor[MaskedPrefs]()
|
||||
for i := 0; i < mt.NumField(); i++ {
|
||||
for i := range mt.NumField() {
|
||||
name := mt.Field(i).Name
|
||||
if i == 0 {
|
||||
if name != "Prefs" {
|
||||
|
@ -75,11 +75,11 @@ func TestDropOldLogs(t *testing.T) {
|
||||
f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false, MaxFileSize: 1000})
|
||||
defer f.close(t)
|
||||
// Make filch rotate the logs 3 times
|
||||
for i := 0; i < tc.write; i++ {
|
||||
for range tc.write {
|
||||
f.write(t, line1)
|
||||
}
|
||||
// We should only be able to read the last 150 lines
|
||||
for i := 0; i < tc.read; i++ {
|
||||
for i := range tc.read {
|
||||
f.read(t, line1)
|
||||
if t.Failed() {
|
||||
t.Logf("could only read %d lines", i)
|
||||
|
@ -77,7 +77,7 @@ func(w http.ResponseWriter, r *http.Request) {
|
||||
func TestDrainPendingMessages(t *testing.T) {
|
||||
ts, l := NewLogtailTestHarness(t)
|
||||
|
||||
for i := 0; i < logLines; i++ {
|
||||
for range logLines {
|
||||
l.Write([]byte("log line"))
|
||||
}
|
||||
|
||||
@ -540,7 +540,7 @@ func BenchmarkWriteText(b *testing.B) {
|
||||
l.clock = tstime.StdClock{}
|
||||
l.buffer = discardBuffer{}
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
must.Get(l.Write(testdataTextLog))
|
||||
}
|
||||
}
|
||||
@ -550,7 +550,7 @@ func BenchmarkWriteJSON(b *testing.B) {
|
||||
l.clock = tstime.StdClock{}
|
||||
l.buffer = discardBuffer{}
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
must.Get(l.Write(testdataJSONLog))
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ func TestCurrentFileDescriptors(t *testing.T) {
|
||||
|
||||
// Open some FDs.
|
||||
const extra = 10
|
||||
for i := 0; i < extra; i++ {
|
||||
for i := range extra {
|
||||
f, err := os.Open("/proc/self/stat")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -58,7 +58,7 @@ func TestCurrentFileDescriptors(t *testing.T) {
|
||||
|
||||
func BenchmarkCurrentFileDescriptors(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
_ = CurrentFDs()
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func labelString(k any) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("{")
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
for i := range t.NumField() {
|
||||
if i > 0 {
|
||||
sb.WriteString(",")
|
||||
}
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
func TestInversePrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
for len := 0; len < 9; len++ {
|
||||
addr := i & (0xFF << (8 - len))
|
||||
idx := prefixIndex(uint8(addr), len)
|
||||
@ -32,7 +32,7 @@ func TestInversePrefix(t *testing.T) {
|
||||
|
||||
func TestHostIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
got := hostIndex(uint8(i))
|
||||
want := prefixIndex(uint8(i), 8)
|
||||
if got != want {
|
||||
@ -63,7 +63,7 @@ func TestStrideTableInsert(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
addr := uint8(i)
|
||||
slowVal, slowOK := slow.get(addr)
|
||||
fastVal, fastOK := fast.get(addr)
|
||||
@ -103,7 +103,7 @@ func TestStrideTableInsertShuffled(t *testing.T) {
|
||||
|
||||
// Order of insertion should not affect the final shape of the stride table.
|
||||
routes2 := append([]slowEntry[int](nil), routes...) // dup so we can print both slices on fail
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
rand.Shuffle(len(routes2), func(i, j int) { routes2[i], routes2[j] = routes2[j], routes2[i] })
|
||||
rt2 := strideTable[int]{}
|
||||
for _, route := range routes2 {
|
||||
@ -152,7 +152,7 @@ func TestStrideTableDelete(t *testing.T) {
|
||||
t.Fatalf("slowTable has %d entries after deletes, want 50", cnt)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
addr := uint8(i)
|
||||
slowVal, slowOK := slow.get(addr)
|
||||
fastVal, fastOK := fast.get(addr)
|
||||
@ -188,7 +188,7 @@ func TestStrideTableDeleteShuffle(t *testing.T) {
|
||||
|
||||
// Order of deletion should not affect the final shape of the stride table.
|
||||
toDelete2 := append([]slowEntry[int](nil), toDelete...) // dup so we can print both slices on fail
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
rand.Shuffle(len(toDelete2), func(i, j int) { toDelete2[i], toDelete2[j] = toDelete2[j], toDelete2[i] })
|
||||
rt2 := strideTable[int]{}
|
||||
for _, route := range routes {
|
||||
@ -262,7 +262,7 @@ func forStrideCountAndOrdering(b *testing.B, fn func(b *testing.B, routes []slow
|
||||
func BenchmarkStrideTableInsertion(b *testing.B) {
|
||||
forStrideCountAndOrdering(b, func(b *testing.B, routes []slowEntry[int]) {
|
||||
val := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
var rt strideTable[int]
|
||||
for _, route := range routes {
|
||||
rt.insert(route.addr, route.len, val)
|
||||
@ -285,7 +285,7 @@ func BenchmarkStrideTableDeletion(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
rt2 := rt
|
||||
for _, route := range routes {
|
||||
rt2.delete(route.addr, route.len)
|
||||
@ -311,7 +311,7 @@ func BenchmarkStrideTableGet(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := range b.N {
|
||||
writeSink, _ = rt.get(uint8(i))
|
||||
}
|
||||
gets := float64(b.N)
|
||||
|
@ -594,7 +594,7 @@ func TestInsertCompare(t *testing.T) {
|
||||
|
||||
seenVals4 := map[int]bool{}
|
||||
seenVals6 := map[int]bool{}
|
||||
for i := 0; i < 10_000; i++ {
|
||||
for range 10_000 {
|
||||
a := randomAddr()
|
||||
slowVal, slowOK := slow.get(a)
|
||||
fastVal, fastOK := fast.Get(a)
|
||||
@ -644,12 +644,12 @@ func TestInsertShuffled(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
pfxs2 := append([]slowPrefixEntry[int](nil), pfxs...)
|
||||
rand.Shuffle(len(pfxs2), func(i, j int) { pfxs2[i], pfxs2[j] = pfxs2[j], pfxs2[i] })
|
||||
|
||||
addrs := make([]netip.Addr, 0, 10_000)
|
||||
for i := 0; i < 10_000; i++ {
|
||||
for range 10_000 {
|
||||
addrs = append(addrs, randomAddr())
|
||||
}
|
||||
|
||||
@ -723,7 +723,7 @@ func TestDeleteCompare(t *testing.T) {
|
||||
|
||||
seenVals4 := map[int]bool{}
|
||||
seenVals6 := map[int]bool{}
|
||||
for i := 0; i < numProbes; i++ {
|
||||
for range numProbes {
|
||||
a := randomAddr()
|
||||
slowVal, slowOK := slow.get(a)
|
||||
fastVal, fastOK := fast.Get(a)
|
||||
@ -789,7 +789,7 @@ func TestDeleteShuffled(t *testing.T) {
|
||||
rt.Delete(pfx.pfx)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
pfxs2 := append([]slowPrefixEntry[int](nil), pfxs...)
|
||||
toDelete2 := append([]slowPrefixEntry[int](nil), toDelete...)
|
||||
rand.Shuffle(len(toDelete2), func(i, j int) { toDelete2[i], toDelete2[j] = toDelete2[j], toDelete2[i] })
|
||||
@ -806,7 +806,7 @@ func TestDeleteShuffled(t *testing.T) {
|
||||
|
||||
// Diffing a deep tree of tables gives cmp.Diff a nervous breakdown, so
|
||||
// test for equivalence statistically with random probes instead.
|
||||
for i := 0; i < numProbes; i++ {
|
||||
for range numProbes {
|
||||
a := randomAddr()
|
||||
val1, ok1 := rt.Get(a)
|
||||
val2, ok2 := rt2.Get(a)
|
||||
@ -909,7 +909,7 @@ func BenchmarkTableInsertion(b *testing.B) {
|
||||
var startMem, endMem runtime.MemStats
|
||||
runtime.ReadMemStats(&startMem)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
var rt Table[int]
|
||||
for _, route := range routes {
|
||||
rt.Insert(route.pfx, route.val)
|
||||
@ -944,7 +944,7 @@ func BenchmarkTableDelete(b *testing.B) {
|
||||
|
||||
var t runningTimer
|
||||
allocs, bytes := getMemCost(func() {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
var rt Table[int]
|
||||
for _, route := range routes {
|
||||
rt.Insert(route.pfx, route.val)
|
||||
@ -983,7 +983,7 @@ func BenchmarkTableGet(b *testing.B) {
|
||||
// cost is 16 bytes - presumably due to some amortized costs in
|
||||
// the memory allocator? Either way, empirically 100 iterations
|
||||
// reliably reports the correct cost.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
_ = genAddr()
|
||||
}
|
||||
})
|
||||
@ -991,7 +991,7 @@ func BenchmarkTableGet(b *testing.B) {
|
||||
addrBytes /= 100
|
||||
var t runningTimer
|
||||
allocs, bytes := getMemCost(func() {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
addr := genAddr()
|
||||
t.Start()
|
||||
writeSink, _ = rt.Get(addr)
|
||||
|
@ -169,7 +169,7 @@ func Benchmark(b *testing.B) {
|
||||
p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789)
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
s := NewStatistics(0, 0, nil)
|
||||
for j := 0; j < 1e3; j++ {
|
||||
s.UpdateTxVirtual(p)
|
||||
@ -180,7 +180,7 @@ func Benchmark(b *testing.B) {
|
||||
p := testPacketV4(ipproto.UDP, [4]byte{}, [4]byte{}, 0, 0, 789)
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
s := NewStatistics(0, 0, nil)
|
||||
for j := 0; j < 1e3; j++ {
|
||||
binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination
|
||||
@ -192,7 +192,7 @@ func Benchmark(b *testing.B) {
|
||||
p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789)
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
s := NewStatistics(0, 0, nil)
|
||||
var group sync.WaitGroup
|
||||
for j := 0; j < runtime.NumCPU(); j++ {
|
||||
@ -214,7 +214,7 @@ func Benchmark(b *testing.B) {
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
s := NewStatistics(0, 0, nil)
|
||||
var group sync.WaitGroup
|
||||
for j := 0; j < runtime.NumCPU(); j++ {
|
||||
|
@ -115,7 +115,7 @@ func TestDNSOverTCP(t *testing.T) {
|
||||
}
|
||||
|
||||
results := map[dnsname.FQDN]string{}
|
||||
for i := 0; i < len(wantResults); i++ {
|
||||
for range len(wantResults) {
|
||||
var respLength uint16
|
||||
if err := binary.Read(c, binary.BigEndian, &respLength); err != nil {
|
||||
t.Fatalf("reading len: %v", err)
|
||||
|
@ -198,7 +198,7 @@ func (m *windowsManager) setHosts(hosts []*HostEntry) error {
|
||||
|
||||
// This can fail spuriously with an access denied error, so retry it a
|
||||
// few times.
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
if err = atomicfile.WriteFile(hostsFile, outB, fileMode); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -613,7 +613,7 @@ func TestRecursionLimit(t *testing.T) {
|
||||
// Fill out a CNAME chain equal to our recursion limit; we won't get
|
||||
// this far since each CNAME is more than 1 level "deep", but this
|
||||
// ensures that we have more than the limit.
|
||||
for i := 0; i < maxDepth+1; i++ {
|
||||
for i := range maxDepth + 1 {
|
||||
curr := fmt.Sprintf("%d-tailscale.com.", i)
|
||||
|
||||
tailscaleNameservers := &dns.Msg{
|
||||
|
@ -67,7 +67,7 @@ func (fl *fwdLog) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
fmt.Fprintf(w, "<html><h1>DNS forwards</h1>")
|
||||
now := time.Now()
|
||||
for i := 0; i < len(fl.ent); i++ {
|
||||
for i := range len(fl.ent) {
|
||||
ent := fl.ent[(i+fl.pos)%len(fl.ent)]
|
||||
if ent.Domain == "" {
|
||||
continue
|
||||
|
@ -199,7 +199,7 @@ func BenchmarkNameFromQuery(b *testing.B) {
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
_, err := nameFromQuery(msg)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -413,7 +413,7 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte)
|
||||
Class: dns.ClassINET,
|
||||
})
|
||||
builder.StartAnswers()
|
||||
for i := 0; i < 120; i++ {
|
||||
for i := range 120 {
|
||||
builder.AResource(dns.ResourceHeader{
|
||||
Name: name,
|
||||
Class: dns.ClassINET,
|
||||
|
@ -976,7 +976,7 @@ func BenchmarkFull(b *testing.B) {
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
syncRespond(r, tt.request)
|
||||
}
|
||||
})
|
||||
|
@ -103,7 +103,7 @@ func likelyHomeRouterIPDarwinExec() (ret netip.Addr, netif string, ok bool) {
|
||||
|
||||
func TestFetchRoutingTable(t *testing.T) {
|
||||
// Issue 1345: this used to be flaky on darwin.
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
_, err := fetchRoutingTable()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -97,7 +97,7 @@ func TestAwsAppRunnerDefaultRouteInterface(t *testing.T) {
|
||||
|
||||
func BenchmarkDefaultRouteInterface(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
if _, err := DefaultRouteInterface(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
func BenchmarkGetPACWindows(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := range b.N {
|
||||
v := getPACWindows()
|
||||
if i == 0 {
|
||||
b.Logf("Got: %q", v)
|
||||
|
@ -512,7 +512,7 @@ func BenchmarkDecode(b *testing.B) {
|
||||
for _, bench := range benches {
|
||||
b.Run(bench.name, func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
var p Parsed
|
||||
p.Decode(bench.buf)
|
||||
}
|
||||
@ -624,7 +624,7 @@ func BenchmarkString(b *testing.B) {
|
||||
var p Parsed
|
||||
p.Decode(bench.buf)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
sinkString = p.String()
|
||||
}
|
||||
})
|
||||
|
@ -21,7 +21,7 @@ func TestCreateOrGetMapping(t *testing.T) {
|
||||
c := NewClient(t.Logf, nil, nil, new(controlknobs.Knobs), nil)
|
||||
defer c.Close()
|
||||
c.SetLocalPort(1234)
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
if i > 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@ -36,7 +36,7 @@ func TestClientProbe(t *testing.T) {
|
||||
}
|
||||
c := NewClient(t.Logf, nil, nil, new(controlknobs.Knobs), nil)
|
||||
defer c.Close()
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
if i > 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
@ -597,7 +597,7 @@ func TestGetUPnPPortMapping(t *testing.T) {
|
||||
firstResponse netip.AddrPort
|
||||
prevPort uint16
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
sawRequestWithLease.Store(false)
|
||||
res, err := c.Probe(ctx)
|
||||
if err != nil {
|
||||
|
@ -62,7 +62,7 @@ func(tt *testTime, rm *radioMonitor) {
|
||||
"400 iterations: 2 sec active, 1 min idle",
|
||||
func(tt *testTime, rm *radioMonitor) {
|
||||
// 400 iterations to ensure values loop back around rm.usage array
|
||||
for i := 0; i < 400; i++ {
|
||||
for range 400 {
|
||||
rm.active()
|
||||
tt.Add(1 * time.Second)
|
||||
rm.active()
|
||||
|
@ -76,7 +76,7 @@ func BenchmarkServerSTUN(b *testing.B) {
|
||||
|
||||
tx := stun.NewTxID()
|
||||
req := stun.Request(tx)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
if _, err := cc.WriteToUDP(req, addr); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func TestRTT(t *testing.T) {
|
||||
// Write a bunch of data to the conn to force TCP session establishment
|
||||
// and a few packets.
|
||||
junkData := bytes.Repeat([]byte("hello world\n"), 1024*1024)
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
if _, err := conn.Write(junkData); err != nil {
|
||||
t.Fatalf("error writing junk data [%d]: %v", i, err)
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func TestNewContainsIPFunc(t *testing.T) {
|
||||
|
||||
func BenchmarkTailscaleServiceAddr(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
sinkIP = TailscaleServiceIP()
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func TestProxyFromEnvironment_setNoProxyUntil(t *testing.T) {
|
||||
os.Setenv("HTTPS_PROXY", fakeProxyEnv)
|
||||
|
||||
req := &http.Request{URL: must.Get(url.Parse("https://example.com/"))}
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
switch i {
|
||||
case 1:
|
||||
setNoProxyUntil(time.Minute)
|
||||
|
@ -222,7 +222,7 @@ func TestReadAndInject(t *testing.T) {
|
||||
var seen = make(map[string]bool)
|
||||
sizes := make([]int, 1)
|
||||
// We expect the same packets back, in no particular order.
|
||||
for i := 0; i < len(written)+len(injected); i++ {
|
||||
for i := range len(written) + len(injected) {
|
||||
packet := buf[:]
|
||||
buffs := [][]byte{packet}
|
||||
numPackets, err := tun.Read(buffs, sizes, 0)
|
||||
@ -283,7 +283,7 @@ func TestWriteAndInject(t *testing.T) {
|
||||
|
||||
seen := make(map[string]bool)
|
||||
// We expect the same packets back, in no particular order.
|
||||
for i := 0; i < len(written)+len(injected); i++ {
|
||||
for i := range len(written) + len(injected) {
|
||||
packet := <-chtun.Inbound
|
||||
got := string(packet)
|
||||
t.Logf("read %d: got %s", i, got)
|
||||
@ -470,7 +470,7 @@ func BenchmarkWrite(b *testing.B) {
|
||||
defer tun.Close()
|
||||
|
||||
packet := [][]byte{udp4("5.6.7.8", "1.2.3.4", 89, 89)}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
_, err := ftun.Write(packet, 0)
|
||||
if err != nil {
|
||||
b.Errorf("err = %v; want nil", err)
|
||||
@ -902,7 +902,7 @@ type captureRecord struct {
|
||||
pkt: []byte("InjectOutboundPacketBuffer"),
|
||||
},
|
||||
}
|
||||
for i := 0; i < len(want); i++ {
|
||||
for i := range len(want) {
|
||||
want[i].now = now
|
||||
}
|
||||
if !reflect.DeepEqual(captured, want) {
|
||||
|
@ -45,7 +45,7 @@ func stateFileUnix() string {
|
||||
}
|
||||
|
||||
try := path
|
||||
for i := 0; i < 3; i++ { // check writability of the file, /var/lib/tailscale, and /var/lib
|
||||
for range 3 { // check writability of the file, /var/lib/tailscale, and /var/lib
|
||||
err := unix.Access(try, unix.O_RDWR)
|
||||
if err == nil {
|
||||
return path
|
||||
|
@ -114,7 +114,7 @@ func BenchmarkParsePorts(b *testing.B) {
|
||||
1: 00000000000000000000000000000000:1F91 00000000000000000000000000000000:0000 0A 00000000:00000000 00:00000000 00000000 1000 0 142240557 1 0000000000000000 100 0 0 10 0
|
||||
2: 00000000000000000000000000000000:0016 00000000000000000000000000000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 34064 1 0000000000000000 100 0 0 10 0
|
||||
`)
|
||||
for i := 0; i < 50000; i++ {
|
||||
for range 50000 {
|
||||
contents.WriteString(" 3: 69050120005716BC64906EBE009ECD4D:D506 0047062600000000000000006E171268:01BB 01 00000000:00000000 02:0000009E 00000000 1000 0 151042856 2 0000000000000000 21 4 28 10 -1\n")
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ func BenchmarkParsePorts(b *testing.B) {
|
||||
r := bytes.NewReader(contents.Bytes())
|
||||
br := bufio.NewReader(&contents)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
r.Seek(0, io.SeekStart)
|
||||
br.Reset(r)
|
||||
err := li.parseProcNetFile(br, "tcp6")
|
||||
@ -142,7 +142,7 @@ func BenchmarkFindProcessNames(b *testing.B) {
|
||||
need := map[string]*portMeta{
|
||||
"something-we'll-never-find": new(portMeta),
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
if err := li.findProcessNames(need); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func benchmarkGetList(b *testing.B, incremental bool) {
|
||||
b.Skip(p.initErr)
|
||||
}
|
||||
b.Cleanup(func() { p.Close() })
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
pl, err := p.getList()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
@ -186,7 +186,7 @@ func Test_packetsForSize(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hashes := make(map[string]int)
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
pkts := packetsForSize(int64(tt.size))
|
||||
if len(pkts) != tt.wantPackets {
|
||||
t.Errorf("packetsForSize(%d) got %d packets, want %d", tt.size, len(pkts), tt.wantPackets)
|
||||
|
@ -155,7 +155,7 @@ func TestProberRun(t *testing.T) {
|
||||
const startingProbes = 100
|
||||
var probes []*Probe
|
||||
|
||||
for i := 0; i < startingProbes; i++ {
|
||||
for i := range startingProbes {
|
||||
probes = append(probes, p.Run(fmt.Sprintf("probe%d", i), probeInterval, nil, FuncProbe(func(context.Context) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
@ -76,7 +76,7 @@ func TestBasics(t *testing.T) {
|
||||
errs <- nil
|
||||
}()
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
if err := <-errs; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func TestExpectedWindowsTypes(t *testing.T) {
|
||||
errs <- nil
|
||||
}()
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
if err := <-errs; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func benchEncoder(b *testing.B, mk func() (*zstd.Encoder, error)) {
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
e.EncodeAll(in, out)
|
||||
}
|
||||
}
|
||||
@ -66,7 +66,7 @@ func benchEncoderWithConstruction(b *testing.B, mk func() (*zstd.Encoder, error)
|
||||
out := make([]byte, 0, 10<<10) // 10kiB
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
e, err := mk()
|
||||
if err != nil {
|
||||
b.Fatalf("making encoder: %v", err)
|
||||
@ -88,7 +88,7 @@ func benchDecoder(b *testing.B, mk func() (*zstd.Decoder, error)) {
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
d.DecodeAll(in, out)
|
||||
}
|
||||
}
|
||||
@ -100,7 +100,7 @@ func benchDecoderWithConstruction(b *testing.B, mk func() (*zstd.Decoder, error)
|
||||
out := make([]byte, 0, 10<<10)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
d, err := mk()
|
||||
if err != nil {
|
||||
b.Fatalf("creating decoder: %v", err)
|
||||
|
@ -1016,7 +1016,7 @@ func TestPublicKeyFetching(t *testing.T) {
|
||||
pubKeyHTTPClient: ts.Client(),
|
||||
timeNow: clock.Now,
|
||||
}
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
got, err := srv.fetchPublicKeysURL(keys + "/alice.keys")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -45,7 +45,7 @@ func TestWaitGroupChan(t *testing.T) {
|
||||
|
||||
func TestClosedChan(t *testing.T) {
|
||||
ch := ClosedChan()
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
|
@ -23,7 +23,7 @@
|
||||
)
|
||||
|
||||
func fieldsOf(t reflect.Type) (fields []string) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
for i := range t.NumField() {
|
||||
fields = append(fields, t.Field(i).Name)
|
||||
}
|
||||
return
|
||||
|
@ -118,7 +118,7 @@ func Test(t *testing.T) {
|
||||
|
||||
func TestRemove0(t *testing.T) {
|
||||
h := new(myHeap[int])
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
h.Push(i)
|
||||
}
|
||||
h.verify(t, 0)
|
||||
@ -135,7 +135,7 @@ func TestRemove0(t *testing.T) {
|
||||
|
||||
func TestRemove1(t *testing.T) {
|
||||
h := new(myHeap[int])
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
h.Push(i)
|
||||
}
|
||||
h.verify(t, 0)
|
||||
@ -153,7 +153,7 @@ func TestRemove2(t *testing.T) {
|
||||
N := 10
|
||||
|
||||
h := new(myHeap[int])
|
||||
for i := 0; i < N; i++ {
|
||||
for i := range N {
|
||||
h.Push(i)
|
||||
}
|
||||
h.verify(t, 0)
|
||||
@ -167,7 +167,7 @@ func TestRemove2(t *testing.T) {
|
||||
if len(m) != N {
|
||||
t.Errorf("len(m) = %d; want %d", len(m), N)
|
||||
}
|
||||
for i := 0; i < len(m); i++ {
|
||||
for i := range len(m) {
|
||||
if !m[i] {
|
||||
t.Errorf("m[%d] doesn't exist", i)
|
||||
}
|
||||
@ -177,7 +177,7 @@ func TestRemove2(t *testing.T) {
|
||||
func BenchmarkDup(b *testing.B) {
|
||||
const n = 10000
|
||||
h := make(myHeap[int], 0, n)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
for j := 0; j < n; j++ {
|
||||
Push[int](&h, 0) // all elements are the same
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ func (c *testChain) buildChain() {
|
||||
// in O(n+1) where n is the number of AUMs.
|
||||
c.AUMs = make(map[string]AUM, len(c.Nodes))
|
||||
c.AUMHashes = make(map[string]AUMHash, len(c.Nodes))
|
||||
for i := 0; i < len(c.Nodes)+1; i++ {
|
||||
for range len(c.Nodes) + 1 {
|
||||
if len(pending) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func TestSigNested_DeepNesting(t *testing.T) {
|
||||
|
||||
outer := nestedSig
|
||||
var lastNodeKey key.NodePrivate
|
||||
for i := 0; i < 15; i++ { // 15 = max nesting level for CBOR
|
||||
for range 15 { // 15 = max nesting level for CBOR
|
||||
lastNodeKey = key.NewNode()
|
||||
nodeKeyPub, _ := lastNodeKey.Public().MarshalBinary()
|
||||
|
||||
|
@ -142,7 +142,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) (
|
||||
|
||||
if hasRemoteHead {
|
||||
curs := localOffer.Head
|
||||
for i := 0; i < maxSyncHeadIntersectionIter; i++ {
|
||||
for range maxSyncHeadIntersectionIter {
|
||||
parent, err := storage.AUM(curs)
|
||||
if err != nil {
|
||||
if err != os.ErrNotExist {
|
||||
|
@ -587,7 +587,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in
|
||||
return AUMHash{}, err
|
||||
}
|
||||
|
||||
for i := 0; i < minChain; i++ {
|
||||
for i := range minChain {
|
||||
h := next.Hash()
|
||||
verdict[h] |= retainStateActive
|
||||
|
||||
|
@ -213,7 +213,7 @@ func fastForwardWithAdvancer(
|
||||
|
||||
curs := nextAUM
|
||||
state := startState
|
||||
for i := 0; i < maxIter; i++ {
|
||||
for range maxIter {
|
||||
if done != nil && done(curs, state) {
|
||||
return curs, state, nil
|
||||
}
|
||||
|
@ -730,7 +730,7 @@ func TestCapturePcap(t *testing.T) {
|
||||
const pcapHeaderSize = 24
|
||||
|
||||
// there is a lag before the io.Copy writes a packet to the pcap files
|
||||
for i := 0; i < (timeLimit * 10); i++ {
|
||||
for range timeLimit * 10 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if (fileSize(s1Pcap) > pcapHeaderSize) && (fileSize(s2Pcap) > pcapHeaderSize) {
|
||||
break
|
||||
|
@ -153,7 +153,7 @@ func TestZeroInitClock(t *testing.T) {
|
||||
t.Errorf("clock has step %v, want 0", step)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
if got := clock.Now(); !got.Equal(start) {
|
||||
t.Errorf("step %v: clock.Now() = %v, want %v", i, got, start)
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func (vm *vmInstance) running() bool {
|
||||
|
||||
func (vm *vmInstance) waitStartup(t *testing.T) {
|
||||
t.Helper()
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
if vm.running() {
|
||||
break
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ func retry(t *testing.T, fn func() error) {
|
||||
t.Helper()
|
||||
const tries = 3
|
||||
var err error
|
||||
for i := 0; i < tries; i++ {
|
||||
for i := range tries {
|
||||
err = fn()
|
||||
if err != nil {
|
||||
t.Logf("%dth invocation failed, trying again: %v", i, err)
|
||||
|
@ -321,7 +321,7 @@ func (h *Harness) setupSSHShell(t *testing.T, d Distro, ipm ipMapping) (*ssh.Cli
|
||||
// don't use socket activation.
|
||||
const maxRetries = 5
|
||||
var working bool
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
for range maxRetries {
|
||||
cli, err := ssh.Dial("tcp", hostport, ccfg)
|
||||
if err == nil {
|
||||
working = true
|
||||
|
@ -17,7 +17,7 @@
|
||||
func TestAllocIPs(t *testing.T) {
|
||||
n := NewInternet()
|
||||
saw := map[netip.Addr]bool{}
|
||||
for i := 0; i < 255; i++ {
|
||||
for range 255 {
|
||||
for _, f := range []func(*Interface) netip.Addr{n.allocIPv4, n.allocIPv6} {
|
||||
ip := f(nil)
|
||||
if saw[ip] {
|
||||
|
@ -33,7 +33,7 @@ func ResourceCheck(tb testing.TB) {
|
||||
return
|
||||
}
|
||||
// Goroutines might be still exiting.
|
||||
for i := 0; i < 300; i++ {
|
||||
for range 300 {
|
||||
if runtime.NumGoroutine() <= startN {
|
||||
return
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ func TestRandomDurationBetween(t *testing.T) {
|
||||
}
|
||||
const min = 1 * time.Second
|
||||
const max = 10 * time.Second
|
||||
for i := 0; i < 500; i++ {
|
||||
for range 500 {
|
||||
if got := RandomDurationBetween(min, max); got < min || got >= max {
|
||||
t.Fatalf("%v (%d) out of range", got, got)
|
||||
}
|
||||
|
@ -50,14 +50,14 @@ func TestJSONRoundtrip(t *testing.T) {
|
||||
|
||||
func BenchmarkMonoNow(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
Now()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTimeNow(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
time.Now()
|
||||
}
|
||||
}
|
||||
|
@ -7,8 +7,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
|
||||
package rate
|
||||
|
||||
import (
|
||||
@ -138,7 +136,7 @@ func TestSimultaneousRequests(t *testing.T) {
|
||||
}
|
||||
|
||||
wg.Add(numRequests)
|
||||
for i := 0; i < numRequests; i++ {
|
||||
for range numRequests {
|
||||
go f()
|
||||
}
|
||||
wg.Wait()
|
||||
|
@ -43,7 +43,7 @@ func TestValue(t *testing.T) {
|
||||
c := qt.New(t)
|
||||
var v Value
|
||||
var now mono.Time
|
||||
for i := 0; i < numStep; i++ {
|
||||
for range numStep {
|
||||
v.addNow(now, float64(step))
|
||||
now += step
|
||||
}
|
||||
@ -232,7 +232,7 @@ func stats(fs []float64) (mean, stddev float64) {
|
||||
func BenchmarkValue(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
v := Value{HalfLife: time.Second}
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
v.Add(1)
|
||||
}
|
||||
}
|
||||
|
@ -521,7 +521,7 @@ func BenchmarkLogNot200(b *testing.B) {
|
||||
h := StdHandler(rh, HandlerOptions{QuietLoggingIfSuccessful: true})
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
rw := new(httptest.ResponseRecorder)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
*rw = httptest.ResponseRecorder{}
|
||||
h.ServeHTTP(rw, req)
|
||||
}
|
||||
@ -536,7 +536,7 @@ func BenchmarkLog(b *testing.B) {
|
||||
h := StdHandler(rh, HandlerOptions{})
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
rw := new(httptest.ResponseRecorder)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for range b.N {
|
||||
*rw = httptest.ResponseRecorder{}
|
||||
h.ServeHTTP(rw, req)
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func TestAcceptedNamesContainsPreferredNames(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProtoTextEncodingRoundTrip(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
text := must.Get(Proto(i).MarshalText())
|
||||
var p Proto
|
||||
must.Do(p.UnmarshalText(text))
|
||||
@ -67,7 +67,7 @@ func TestProtoUnmarshalText(t *testing.T) {
|
||||
t.Fatalf("empty input, got err=%v, p=%v, want nil, 0", err, p)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
var p Proto
|
||||
must.Do(p.UnmarshalText([]byte(fmt.Sprintf("%d", i))))
|
||||
if got, want := p, Proto(i); got != want {
|
||||
@ -93,7 +93,7 @@ func TestProtoUnmarshalText(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProtoMarshalText(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
text := must.Get(Proto(i).MarshalText())
|
||||
|
||||
if wantName, ok := preferredNames[Proto(i)]; ok {
|
||||
@ -110,7 +110,7 @@ func TestProtoMarshalText(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProtoMarshalJSON(t *testing.T) {
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
j := must.Get(Proto(i).MarshalJSON())
|
||||
if got, want := string(j), fmt.Sprintf(`%d`, i); got != want {
|
||||
t.Errorf("Proto(%d).MarshalJSON() = %q, want %q", i, got, want)
|
||||
@ -121,7 +121,7 @@ func TestProtoMarshalJSON(t *testing.T) {
|
||||
func TestProtoUnmarshalJSON(t *testing.T) {
|
||||
var p Proto
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
for i := range 256 {
|
||||
j := []byte(fmt.Sprintf(`%d`, i))
|
||||
must.Do(json.Unmarshal(j, &p))
|
||||
if got, want := p, Proto(i); got != want {
|
||||
|
@ -162,7 +162,7 @@ func TestChallenge(t *testing.T) {
|
||||
func TestShard(t *testing.T) {
|
||||
const N = 1_000
|
||||
var shardCount [256]int
|
||||
for i := 0; i < N; i++ {
|
||||
for range N {
|
||||
shardCount[NewNode().Public().Shard()]++
|
||||
}
|
||||
e := float64(N) / 256 // expected
|
||||
|
@ -22,7 +22,7 @@ func TestRand(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClamp25519Private(t *testing.T) {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
var k [32]byte
|
||||
rand(k[:])
|
||||
clamp25519Private(k[:])
|
||||
|
@ -89,7 +89,7 @@ func TestSyncValueConcurrent(t *testing.T) {
|
||||
routines = 10000
|
||||
)
|
||||
wg.Add(routines)
|
||||
for i := 0; i < routines; i++ {
|
||||
for range routines {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Every goroutine waits for the go signal, so that more of them
|
||||
|
@ -70,7 +70,7 @@ func TestRateLimiter(t *testing.T) {
|
||||
lgtest := logTester(want, t, &testsRun)
|
||||
lg := RateLimitedFnWithClock(lgtest, 1*time.Minute, 2, 50, nowf)
|
||||
var prefixed Logf
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
lg("boring string with constant formatting %s", "(constant)")
|
||||
lg("templated format string no. %d", i)
|
||||
if i == 4 {
|
||||
@ -121,7 +121,7 @@ func TestLogOnChange(t *testing.T) {
|
||||
lgtest := logTester(want, t, &testsRun)
|
||||
lg := LogOnChange(lgtest, 5*time.Second, timeNow)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
lg("%s", "1 2 3 4 5 6")
|
||||
}
|
||||
lg("1 2 3 4 5 7")
|
||||
|
@ -137,7 +137,7 @@ func (nm *NetworkMap) PeerByTailscaleIP(ip netip.Addr) (peer tailcfg.NodeView, o
|
||||
}
|
||||
for _, n := range nm.Peers {
|
||||
ad := n.Addresses()
|
||||
for i := 0; i < ad.Len(); i++ {
|
||||
for i := range ad.Len() {
|
||||
a := ad.At(i)
|
||||
if a.Addr() == ip {
|
||||
return n, true
|
||||
|
@ -73,7 +73,7 @@ func (m NodeMutationLastSeen) Apply(n *tailcfg.Node) {
|
||||
var peerChangeFields = sync.OnceValue(func() []reflect.StructField {
|
||||
var fields []reflect.StructField
|
||||
rt := reflect.TypeFor[tailcfg.PeerChange]()
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
fields = append(fields, rt.Field(i))
|
||||
}
|
||||
return fields
|
||||
|
@ -44,7 +44,7 @@ func TestMapResponseContainsNonPatchFields(t *testing.T) {
|
||||
}
|
||||
|
||||
rt := reflect.TypeFor[tailcfg.MapResponse]()
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
for i := range rt.NumField() {
|
||||
f := rt.Field(i)
|
||||
|
||||
var want bool
|
||||
|
@ -12,7 +12,7 @@
|
||||
)
|
||||
|
||||
func fieldsOf(t reflect.Type) (fields []string) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
for i := range t.NumField() {
|
||||
if name := t.Field(i).Name; name != "_" {
|
||||
fields = append(fields, name)
|
||||
}
|
||||
|
@ -262,7 +262,7 @@ func (v Slice[T]) AsSlice() []T {
|
||||
//
|
||||
// As it runs in O(n) time, use with care.
|
||||
func (v Slice[T]) IndexFunc(f func(T) bool) int {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
for i := range v.Len() {
|
||||
if f(v.At(i)) {
|
||||
return i
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user