remove clusterPeers and use peers and tags to find peers

This commit is contained in:
Fran Bull 2024-09-25 09:00:38 -07:00
parent 781fd03f27
commit 28a0c21d8b
8 changed files with 92 additions and 43 deletions

View File

@ -92,8 +92,6 @@ type mapSession struct {
lastTKAInfo *tailcfg.TKAInfo lastTKAInfo *tailcfg.TKAInfo
lastNetmapSummary string // from NetworkMap.VeryConcise lastNetmapSummary string // from NetworkMap.VeryConcise
lastMaxExpiry time.Duration lastMaxExpiry time.Duration
clusterPeers tailcfg.ClusterInfo
} }
// newMapSession returns a mostly unconfigured new mapSession. // newMapSession returns a mostly unconfigured new mapSession.
@ -350,8 +348,6 @@ func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
if resp.MaxKeyDuration > 0 { if resp.MaxKeyDuration > 0 {
ms.lastMaxExpiry = resp.MaxKeyDuration ms.lastMaxExpiry = resp.MaxKeyDuration
} }
//TODO delta stuff
ms.clusterPeers = resp.ClusterPeers
} }
var ( var (
@ -808,7 +804,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap {
ControlHealth: ms.lastHealth, ControlHealth: ms.lastHealth,
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled, TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
MaxKeyDuration: ms.lastMaxExpiry, MaxKeyDuration: ms.lastMaxExpiry,
ClusterPeers: ms.clusterPeers,
} }
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" { if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {

View File

@ -371,8 +371,6 @@ type LocalBackend struct {
// backend is healthy and captive portal detection is not required // backend is healthy and captive portal detection is not required
// (sending false). // (sending false).
needsCaptiveDetection chan bool needsCaptiveDetection chan bool
natcOnce sync.Once
} }
// HealthTracker returns the health tracker for the backend. // HealthTracker returns the health tracker for the backend.
@ -3969,27 +3967,76 @@ func (b *LocalBackend) NatcHandlerForFlow() (func(src, dst netip.AddrPort) (hand
} }
func (b *LocalBackend) natc(nm *netmap.NetworkMap, prefs ipn.PrefsView) { func (b *LocalBackend) natc(nm *netmap.NetworkMap, prefs ipn.PrefsView) {
// when we get reconfigured how do we cope with that? like if all nodes get removed and then if nm == nil || !nm.SelfNode.Valid() || b.natConnector == nil {
// fresh nodes added, does that work? or do we have to remove and re-add one by one? // not got enough info to do anything yet
// Is there a time when we would need to cancel the goroutine we start here (presumably there is)? return
if !prefs.NatConnector().Advertise { }
if b.natConnector.ConsensusClient != nil {
// we're already in the cluster
return
}
// TODO these are also in corp
type NatConnectorAttr struct {
Name string `json:"name,omitempty"`
Connectors []string `json:"connectors,omitempty"`
Domains []string `json:"domains,omitempty"`
}
const natConnectorCapName = "tailscale.com/nat-connectors"
sn := nm.SelfNode.AsStruct()
attrs, err := tailcfg.UnmarshalNodeCapJSON[NatConnectorAttr](sn.CapMap, natConnectorCapName)
if err != nil {
b.logf("[unexpected] error parsing app connector mapcap: %v", err)
return
}
if len(attrs) == 0 || len(attrs[0].Connectors) == 0 {
// there's no control config (or invalid config, is that possible? TODO)
return
}
if len(attrs) > 1 || len(attrs[0].Connectors) > 1 {
// TODO what do we do with multiples?
fmt.Println("NAT CONNECTOR NOT PROPERLY HANDLING MULTIPLE STANZAS OR TAGS IN POLICY")
fmt.Println("len(attrs)", len(attrs), "attrs[0].Connectors", attrs[0].Connectors)
}
tagName := attrs[0].Connectors[0]
domains := attrs[0].Domains
slices.Sort(domains)
domains = slices.Compact(domains)
// TODO tell nat connector about domains so that it can handle its side properly
if !views.SliceContains(nm.SelfNode.Tags(), tagName) {
// we're not trying to join the cluster
if b.natConnector != nil { if b.natConnector != nil {
b.natConnector.Stop() b.natConnector.Stop()
b.natConnector = nil b.natConnector = nil
} }
return return
} }
if nm == nil || !nm.ClusterPeers.Addr.IsValid() {
return // TODO log?
}
id := string(nm.SelfNode.StableID()) // TODO this is surely not right
// TODO handle access before StartConsensusMember ipAddrForNodeView := func(nv tailcfg.NodeView) netip.Addr {
// start a goroutine for this node to be a member of the consensus protocol for return nv.Addresses().AsSlice()[0].Addr()
// determining which ip addresses are available for natc.
if b.natConnector.ConsensusClient == nil {
b.natConnector.StartConsensusMember(id, nm.ClusterPeers, b.varRoot)
} }
// we are trying to be in the natc cluster
id := string(nm.SelfNode.StableID())
// let's look for a peer to join
for key, peer := range b.peers {
if views.SliceContains(peer.Tags(), tagName) {
log.Printf("nat-connector: trying to join cluster peer tag=%s, %s, %v", tagName, key, peer)
b.natConnector.JoinConsensus(id, ipAddrForNodeView(nm.SelfNode), ipAddrForNodeView(peer), b.varRoot)
// TODO how do we know if we joined ok?
return
}
}
// no joinable peer found? I will be the leader
log.Printf("nat-connector: leading cluster tag=%s", tagName)
b.natConnector.LeadConsensus(id, ipAddrForNodeView(nm.SelfNode), b.varRoot)
// TODO do i need a whois step? what was that for?
// when we get reconfigured how do we cope with that? like if all nodes get removed and then
// fresh nodes added, does that work? or do we have to remove and re-add one by one?
// Is there a time when we would need to cancel the goroutine we start here (presumably there is)?
} }
// reconfigAppConnectorLocked updates the app connector state based on the // reconfigAppConnectorLocked updates the app connector state based on the

View File

@ -329,6 +329,7 @@ type MaskedPrefs struct {
ProfileNameSet bool `json:",omitempty"` ProfileNameSet bool `json:",omitempty"`
AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"` AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"`
AppConnectorSet bool `json:",omitempty"` AppConnectorSet bool `json:",omitempty"`
NatConnectorSet bool `json:",omitempty"`
PostureCheckingSet bool `json:",omitempty"` PostureCheckingSet bool `json:",omitempty"`
NetfilterKindSet bool `json:",omitempty"` NetfilterKindSet bool `json:",omitempty"`
DriveSharesSet bool `json:",omitempty"` DriveSharesSet bool `json:",omitempty"`

View File

@ -9,6 +9,20 @@
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
) )
var specialPort uint16 = 61820
func makeAddrForConsensus(a netip.Addr) string {
return netip.AddrPortFrom(a, specialPort).String()
}
func JoinConsensus(nodeID string, addr, joinAddr netip.Addr, varRoot string) {
StartConsensusMember(nodeID, makeAddrForConsensus(addr), makeAddrForConsensus(joinAddr), varRoot)
}
func LeadConsensus(nodeID string, addr netip.Addr, varRoot string) {
StartConsensusMember(nodeID, makeAddrForConsensus(addr), "", varRoot)
}
// StartConsensusMember has this node join the consensus protocol for handing out ip addresses // StartConsensusMember has this node join the consensus protocol for handing out ip addresses
func StartConsensusMember(nodeID, addr, joinAddr, varRoot string) { func StartConsensusMember(nodeID, addr, joinAddr, varRoot string) {
var conf uhaha.Config var conf uhaha.Config
@ -28,7 +42,7 @@ func StartConsensusMember(nodeID, addr, joinAddr, varRoot string) {
conf.NodeID = nodeID conf.NodeID = nodeID
conf.Addr = addr conf.Addr = addr
if joinAddr != "" && joinAddr != addr { if joinAddr != "" {
conf.JoinAddr = joinAddr conf.JoinAddr = joinAddr
} }
conf.Flag.Custom = true conf.Flag.Custom = true

View File

@ -19,18 +19,12 @@ type ConsensusClient struct {
rdb *redis.Client rdb *redis.Client
} }
func NewConsensusClient(addr, joinAddr string, logf logger.Logf) *ConsensusClient { func NewConsensusClient(addr, joinAddr netip.Addr, logf logger.Logf) *ConsensusClient {
cc := ConsensusClient{ cc := ConsensusClient{
MyAddr: addr, MyAddr: makeAddrForConsensus(addr),
logf: logf, logf: logf,
} }
if joinAddr == "" { cc.newRedisClient(makeAddrForConsensus(joinAddr))
// initially i am the leader
cc.newRedisClient(addr)
} else {
// initially i am a follower
cc.newRedisClient(joinAddr)
}
return &cc return &cc
} }

View File

@ -190,17 +190,20 @@ func (n *NatConnector) Start() {
} }
func (n *NatConnector) StartConsensusMember(id string, clusterPeers tailcfg.ClusterInfo, varRoot string) { func (n *NatConnector) JoinConsensus(id string, myAddr, joinAddr netip.Addr, varRoot string) {
var leaderAddress string
if clusterPeers.Leader.IsValid() {
leaderAddress = clusterPeers.Leader.String()
}
// TODO something to do with channels to stop this?
go func() { go func() {
n.logf("Starting ippool consensus membership for natc") n.logf("Starting ippool consensus membership for natc")
ippool.StartConsensusMember(id, clusterPeers.Addr.String(), leaderAddress, varRoot) ippool.JoinConsensus(id, myAddr, joinAddr, varRoot)
}() }()
n.ConsensusClient = ippool.NewConsensusClient(clusterPeers.Addr.String(), leaderAddress, n.logf) n.ConsensusClient = ippool.NewConsensusClient(myAddr, joinAddr, n.logf)
}
func (n *NatConnector) LeadConsensus(id string, myAddr netip.Addr, varRoot string) {
go func() {
n.logf("Starting ippool consensus membership for natc")
ippool.LeadConsensus(id, myAddr, varRoot)
}()
n.ConsensusClient = ippool.NewConsensusClient(myAddr, myAddr, n.logf)
} }
func NewNatConnector(l logger.Logf, whoIs func(string, netip.AddrPort) (tailcfg.NodeView, tailcfg.UserProfile, bool)) NatConnector { func NewNatConnector(l logger.Logf, whoIs func(string, netip.AddrPort) (tailcfg.NodeView, tailcfg.UserProfile, bool)) NatConnector {

View File

@ -1957,9 +1957,6 @@ type MapResponse struct {
// MaxKeyDuration describes the MaxKeyDuration setting for the tailnet. // MaxKeyDuration describes the MaxKeyDuration setting for the tailnet.
// If zero, the value is unchanged. // If zero, the value is unchanged.
MaxKeyDuration time.Duration `json:",omitempty"` MaxKeyDuration time.Duration `json:",omitempty"`
// TODO all the delta stuff
ClusterPeers ClusterInfo `json:",omitempty"`
} }
// ClientVersion is information about the latest client version that's available // ClientVersion is information about the latest client version that's available

View File

@ -80,8 +80,6 @@ type NetworkMap struct {
// MaxKeyDuration describes the MaxKeyDuration setting for the tailnet. // MaxKeyDuration describes the MaxKeyDuration setting for the tailnet.
MaxKeyDuration time.Duration MaxKeyDuration time.Duration
ClusterPeers tailcfg.ClusterInfo
} }
// User returns nm.SelfNode.User if nm.SelfNode is non-nil, otherwise it returns // User returns nm.SelfNode.User if nm.SelfNode is non-nil, otherwise it returns