wgengine/netstack/gro: exclude importation of gVisor GRO pkg on iOS (#13202)

In df6014f1d7bf437adf239b75a62fd4c2f389ea2a we removed build tag
gating preventing importation, which tripped a NetworkExtension limit
test in corp. This was a reversal of
25f0a3fc8f6f9cf681bb5afda8e1762816c67a8b which actually made the
situation worse, hence the simplification.

This commit goes back to the strategy in
25f0a3fc8f6f9cf681bb5afda8e1762816c67a8b, and gets us back under the
limit in my local testing. Admittedly, we don't fully understand
the effects of importing or excluding importation of this package,
and have seen mixed results, but this commit allows us to move forward
again.

Updates tailscale/corp#22125

Signed-off-by: Jordan Whited <jordan@tailscale.com>
This commit is contained in:
Jordan Whited 2024-08-20 16:40:10 -07:00 committed by GitHub
parent df6014f1d7
commit 7675c3ebf2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 99 additions and 65 deletions

View File

@ -6,15 +6,12 @@
import (
"bytes"
"sync"
"github.com/tailscale/wireguard-go/tun"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/header/parse"
"gvisor.dev/gvisor/pkg/tcpip/stack"
nsgro "gvisor.dev/gvisor/pkg/tcpip/stack/gro"
"tailscale.com/net/packet"
"tailscale.com/types/ipproto"
)
@ -105,65 +102,3 @@ func RXChecksumOffload(p *packet.Parsed) *stack.PacketBuffer {
packetBuf.RXChecksumValidated = true
return packetBuf
}
var (
groPool sync.Pool
)
func init() {
groPool.New = func() any {
g := &GRO{}
g.gro.Init(true)
return g
}
}
// GRO coalesces incoming packets to increase throughput. It is NOT thread-safe.
type GRO struct {
gro nsgro.GRO
maybeEnqueued bool
}
// NewGRO returns a new instance of *GRO from a sync.Pool. It can be returned to
// the pool with GRO.Flush().
func NewGRO() *GRO {
return groPool.Get().(*GRO)
}
// SetDispatcher sets the underlying stack.NetworkDispatcher where packets are
// delivered.
func (g *GRO) SetDispatcher(d stack.NetworkDispatcher) {
g.gro.Dispatcher = d
}
// Enqueue enqueues the provided packet for GRO. It may immediately deliver
// it to the underlying stack.NetworkDispatcher depending on its contents. To
// explicitly flush previously enqueued packets see Flush().
func (g *GRO) Enqueue(p *packet.Parsed) {
if g.gro.Dispatcher == nil {
return
}
pkt := RXChecksumOffload(p)
if pkt == nil {
return
}
// TODO(jwhited): g.gro.Enqueue() duplicates a lot of p.Decode().
// We may want to push stack.PacketBuffer further up as a
// replacement for packet.Parsed, or inversely push packet.Parsed
// down into refactored GRO logic.
g.gro.Enqueue(pkt)
g.maybeEnqueued = true
pkt.DecRef()
}
// Flush flushes previously enqueued packets to the underlying
// stack.NetworkDispatcher, and returns GRO to a pool for later re-use. Callers
// MUST NOT use GRO once it has been Flush()'d.
func (g *GRO) Flush() {
if g.gro.Dispatcher != nil && g.maybeEnqueued {
g.gro.Flush()
}
g.gro.Dispatcher = nil
g.maybeEnqueued = false
groPool.Put(g)
}

View File

@ -0,0 +1,76 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !ios
package gro
import (
"sync"
"gvisor.dev/gvisor/pkg/tcpip/stack"
nsgro "gvisor.dev/gvisor/pkg/tcpip/stack/gro"
"tailscale.com/net/packet"
)
var (
groPool sync.Pool
)
func init() {
groPool.New = func() any {
g := &GRO{}
g.gro.Init(true)
return g
}
}
// GRO coalesces incoming packets to increase throughput. It is NOT thread-safe.
type GRO struct {
gro nsgro.GRO
maybeEnqueued bool
}
// NewGRO returns a new instance of *GRO from a sync.Pool. It can be returned to
// the pool with GRO.Flush().
func NewGRO() *GRO {
return groPool.Get().(*GRO)
}
// SetDispatcher sets the underlying stack.NetworkDispatcher where packets are
// delivered.
func (g *GRO) SetDispatcher(d stack.NetworkDispatcher) {
g.gro.Dispatcher = d
}
// Enqueue enqueues the provided packet for GRO. It may immediately deliver
// it to the underlying stack.NetworkDispatcher depending on its contents. To
// explicitly flush previously enqueued packets see Flush().
func (g *GRO) Enqueue(p *packet.Parsed) {
if g.gro.Dispatcher == nil {
return
}
pkt := RXChecksumOffload(p)
if pkt == nil {
return
}
// TODO(jwhited): g.gro.Enqueue() duplicates a lot of p.Decode().
// We may want to push stack.PacketBuffer further up as a
// replacement for packet.Parsed, or inversely push packet.Parsed
// down into refactored GRO logic.
g.gro.Enqueue(pkt)
g.maybeEnqueued = true
pkt.DecRef()
}
// Flush flushes previously enqueued packets to the underlying
// stack.NetworkDispatcher, and returns GRO to a pool for later re-use. Callers
// MUST NOT use GRO once it has been Flush()'d.
func (g *GRO) Flush() {
if g.gro.Dispatcher != nil && g.maybeEnqueued {
g.gro.Flush()
}
g.gro.Dispatcher = nil
g.maybeEnqueued = false
groPool.Put(g)
}

View File

@ -0,0 +1,23 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build ios
package gro
import (
"gvisor.dev/gvisor/pkg/tcpip/stack"
"tailscale.com/net/packet"
)
type GRO struct{}
func NewGRO() *GRO {
panic("unsupported on iOS")
}
func (g *GRO) SetDispatcher(_ stack.NetworkDispatcher) {}
func (g *GRO) Enqueue(_ *packet.Parsed) {}
func (g *GRO) Flush() {}