mirror of
https://github.com/tailscale/tailscale.git
synced 2024-11-29 13:05:46 +00:00
net/tstun: split TUN events channel into up/down and MTU
We had a long-standing bug in which our TUN events channel was being received from simultaneously in two places. The first is wireguard-go. At wgengine/userspace.go:366, we pass e.tundev to wireguard-go, which starts a goroutine (RoutineTUNEventReader) that receives from that channel and uses events to adjust the MTU and bring the device up/down. At wgengine/userspace.go:374, we launch a goroutine that receives from e.tundev, logs MTU changes, and triggers state updates when up/down changes occur. Events were getting delivered haphazardly between the two of them. We don't really want wireguard-go to receive the up/down events; we control the state of the device explicitly by calling device.Up. And the userspace.go loop MTU logging duplicates logging that wireguard-go does when it received MTU updates. So this change splits the single TUN events channel into up/down and other (aka MTU), and sends them to the parties that ought to receive them. I'm actually a bit surprised that this hasn't caused more visible trouble. If a down event went to wireguard-go but the subsequent up event went to userspace.go, we could end up with the wireguard-go device disappearing. I believe that this may also (somewhat accidentally) be a fix for #1790. Signed-off-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
parent
97d2fa2f56
commit
99705aa6b7
@ -90,6 +90,11 @@ type Wrapper struct {
|
|||||||
// to discard an empty packet instead of sending it through t.outbound.
|
// to discard an empty packet instead of sending it through t.outbound.
|
||||||
outbound chan []byte
|
outbound chan []byte
|
||||||
|
|
||||||
|
// eventsUpDown yields up and down tun.Events that arrive on a Wrapper's events channel.
|
||||||
|
eventsUpDown chan tun.Event
|
||||||
|
// eventsOther yields non-up-and-down tun.Events that arrive on a Wrapper's events channel.
|
||||||
|
eventsOther chan tun.Event
|
||||||
|
|
||||||
// filter atomically stores the currently active packet filter
|
// filter atomically stores the currently active packet filter
|
||||||
filter atomic.Value // of *filter.Filter
|
filter atomic.Value // of *filter.Filter
|
||||||
// filterFlags control the verbosity of logging packet drops/accepts.
|
// filterFlags control the verbosity of logging packet drops/accepts.
|
||||||
@ -130,11 +135,14 @@ func Wrap(logf logger.Logf, tdev tun.Device) *Wrapper {
|
|||||||
closed: make(chan struct{}),
|
closed: make(chan struct{}),
|
||||||
errors: make(chan error),
|
errors: make(chan error),
|
||||||
outbound: make(chan []byte),
|
outbound: make(chan []byte),
|
||||||
|
eventsUpDown: make(chan tun.Event),
|
||||||
|
eventsOther: make(chan tun.Event),
|
||||||
// TODO(dmytro): (highly rate-limited) hexdumps should happen on unknown packets.
|
// TODO(dmytro): (highly rate-limited) hexdumps should happen on unknown packets.
|
||||||
filterFlags: filter.LogAccepts | filter.LogDrops,
|
filterFlags: filter.LogAccepts | filter.LogDrops,
|
||||||
}
|
}
|
||||||
|
|
||||||
go tun.poll()
|
go tun.poll()
|
||||||
|
go tun.pumpEvents()
|
||||||
// The buffer starts out consumed.
|
// The buffer starts out consumed.
|
||||||
tun.bufferConsumed <- struct{}{}
|
tun.bufferConsumed <- struct{}{}
|
||||||
|
|
||||||
@ -160,8 +168,50 @@ func (t *Wrapper) Close() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pumpEvents copies events from t.tdev to t.eventsUpDown and t.eventsOther.
|
||||||
|
// pumpEvents exits when t.tdev.events or t.closed is closed.
|
||||||
|
// pumpEvents closes t.eventsUpDown and t.eventsOther when it exits.
|
||||||
|
func (t *Wrapper) pumpEvents() {
|
||||||
|
defer close(t.eventsUpDown)
|
||||||
|
defer close(t.eventsOther)
|
||||||
|
src := t.tdev.Events()
|
||||||
|
for {
|
||||||
|
// Retrieve an event from the TUN device.
|
||||||
|
var event tun.Event
|
||||||
|
var ok bool
|
||||||
|
select {
|
||||||
|
case <-t.closed:
|
||||||
|
return
|
||||||
|
case event, ok = <-src:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass along event to the correct recipient.
|
||||||
|
// Though event is a bitmask, in practice there is only ever one bit set at a time.
|
||||||
|
dst := t.eventsOther
|
||||||
|
if event&(tun.EventUp|tun.EventDown) != 0 {
|
||||||
|
dst = t.eventsUpDown
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-t.closed:
|
||||||
|
return
|
||||||
|
case dst <- event:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventsUpDown returns a TUN event channel that contains all Up and Down events.
|
||||||
|
func (t *Wrapper) EventsUpDown() chan tun.Event {
|
||||||
|
return t.eventsUpDown
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events returns a TUN event channel that contains all non-Up, non-Down events.
|
||||||
|
// It is named Events because it is the set of events that we want to expose to wireguard-go,
|
||||||
|
// and Events is the name specified by the wireguard-go tun.Device interface.
|
||||||
func (t *Wrapper) Events() chan tun.Event {
|
func (t *Wrapper) Events() chan tun.Event {
|
||||||
return t.tdev.Events()
|
return t.eventsOther
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Wrapper) File() *os.File {
|
func (t *Wrapper) File() *os.File {
|
||||||
|
@ -373,11 +373,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error)
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
up := false
|
up := false
|
||||||
for event := range e.tundev.Events() {
|
for event := range e.tundev.EventsUpDown() {
|
||||||
if event&tun.EventMTUUpdate != 0 {
|
|
||||||
mtu, err := e.tundev.MTU()
|
|
||||||
e.logf("external route MTU: %d (%v)", mtu, err)
|
|
||||||
}
|
|
||||||
if event&tun.EventUp != 0 && !up {
|
if event&tun.EventUp != 0 && !up {
|
||||||
e.logf("external route: up")
|
e.logf("external route: up")
|
||||||
e.RequestStatus()
|
e.RequestStatus()
|
||||||
|
Loading…
Reference in New Issue
Block a user