2023-01-27 21:37:20 +00:00
|
|
|
// Copyright (c) Tailscale Inc & AUTHORS
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2021-04-19 02:49:25 +00:00
|
|
|
|
|
|
|
package controlclient
|
|
|
|
|
|
|
|
import (
|
2023-08-21 02:52:52 +00:00
|
|
|
"context"
|
2023-08-22 20:32:04 +00:00
|
|
|
"encoding/json"
|
2022-07-20 20:31:20 +00:00
|
|
|
"fmt"
|
2023-08-22 20:32:04 +00:00
|
|
|
"net"
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
"net/netip"
|
2023-08-22 20:32:04 +00:00
|
|
|
"reflect"
|
|
|
|
"slices"
|
2021-04-19 02:49:25 +00:00
|
|
|
"sort"
|
2023-08-22 20:32:04 +00:00
|
|
|
"strconv"
|
|
|
|
"sync"
|
2023-09-02 02:28:00 +00:00
|
|
|
"time"
|
2021-04-19 02:49:25 +00:00
|
|
|
|
2023-09-11 19:03:39 +00:00
|
|
|
"tailscale.com/control/controlknobs"
|
2022-01-24 18:52:57 +00:00
|
|
|
"tailscale.com/envknob"
|
2021-04-19 02:49:25 +00:00
|
|
|
"tailscale.com/tailcfg"
|
2023-08-21 02:52:52 +00:00
|
|
|
"tailscale.com/tstime"
|
2021-09-03 20:17:46 +00:00
|
|
|
"tailscale.com/types/key"
|
2021-04-19 02:49:25 +00:00
|
|
|
"tailscale.com/types/logger"
|
|
|
|
"tailscale.com/types/netmap"
|
2023-08-22 15:07:26 +00:00
|
|
|
"tailscale.com/types/ptr"
|
2023-01-03 23:39:32 +00:00
|
|
|
"tailscale.com/types/views"
|
2023-08-22 20:32:04 +00:00
|
|
|
"tailscale.com/util/clientmetric"
|
2023-08-21 02:52:52 +00:00
|
|
|
"tailscale.com/util/cmpx"
|
2021-04-19 02:49:25 +00:00
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
|
)
|
|
|
|
|
|
|
|
// mapSession holds the state over a long-polled "map" request to the
|
|
|
|
// control plane.
|
|
|
|
//
|
|
|
|
// It accepts incremental tailcfg.MapResponse values to
|
|
|
|
// netMapForResponse and returns fully inflated NetworkMaps, filling
|
|
|
|
// in the omitted data implicit from prior MapResponse values from
|
|
|
|
// within the same session (the same long-poll HTTP response to the
|
|
|
|
// one MapRequest).
|
|
|
|
type mapSession struct {
|
|
|
|
// Immutable fields.
|
2023-09-02 02:28:00 +00:00
|
|
|
netmapUpdater NetmapUpdater // called on changes (in addition to the optional hooks below)
|
2023-09-11 19:03:39 +00:00
|
|
|
controlKnobs *controlknobs.Knobs // or nil
|
2023-08-19 16:42:34 +00:00
|
|
|
privateNodeKey key.NodePrivate
|
2023-08-22 15:07:26 +00:00
|
|
|
publicNodeKey key.NodePublic
|
2023-08-19 16:42:34 +00:00
|
|
|
logf logger.Logf
|
|
|
|
vlogf logger.Logf
|
|
|
|
machinePubKey key.MachinePublic
|
2023-08-21 02:52:52 +00:00
|
|
|
altClock tstime.Clock // if nil, regular time is used
|
|
|
|
cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context
|
|
|
|
watchdogReset chan struct{} // send to request that the long poll activity watchdog timeout be reset
|
|
|
|
|
|
|
|
// sessionAliveCtx is a Background-based context that's alive for the
|
|
|
|
// duration of the mapSession that we own the lifetime of. It's closed by
|
|
|
|
// sessionAliveCtxClose.
|
|
|
|
sessionAliveCtx context.Context
|
|
|
|
sessionAliveCtxClose context.CancelFunc // closes sessionAliveCtx
|
|
|
|
|
|
|
|
// Optional hooks, set once before use.
|
|
|
|
|
|
|
|
// onDebug specifies what to do with a *tailcfg.Debug message.
|
|
|
|
// If the watchdogReset chan is nil, it's not used. Otherwise it can be sent to
|
|
|
|
// to request that the long poll activity watchdog timeout be reset.
|
|
|
|
onDebug func(_ context.Context, _ *tailcfg.Debug, watchdogReset chan<- struct{}) error
|
|
|
|
|
|
|
|
// onConciseNetMapSummary, if non-nil, is called with the Netmap.VeryConcise summary
|
|
|
|
// whenever a map response is received.
|
|
|
|
onConciseNetMapSummary func(string)
|
|
|
|
|
|
|
|
// onSelfNodeChanged is called before the NetmapUpdater if the self node was
|
|
|
|
// changed.
|
|
|
|
onSelfNodeChanged func(*netmap.NetworkMap)
|
2021-04-19 02:49:25 +00:00
|
|
|
|
2022-09-25 18:29:55 +00:00
|
|
|
// Fields storing state over the course of multiple MapResponses.
|
2023-08-21 22:14:22 +00:00
|
|
|
lastNode tailcfg.NodeView
|
2023-08-22 15:07:26 +00:00
|
|
|
peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers.
|
|
|
|
sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID
|
2021-04-19 02:49:25 +00:00
|
|
|
lastDNSConfig *tailcfg.DNSConfig
|
|
|
|
lastDERPMap *tailcfg.DERPMap
|
|
|
|
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
|
2023-01-03 23:39:32 +00:00
|
|
|
lastPacketFilterRules views.Slice[tailcfg.FilterRule]
|
2021-04-19 02:49:25 +00:00
|
|
|
lastParsedPacketFilter []filter.Match
|
2022-02-10 22:12:34 +00:00
|
|
|
lastSSHPolicy *tailcfg.SSHPolicy
|
2021-04-19 02:49:25 +00:00
|
|
|
collectServices bool
|
2021-04-19 03:48:24 +00:00
|
|
|
lastDomain string
|
2022-10-06 23:19:38 +00:00
|
|
|
lastDomainAuditLogID string
|
2021-09-18 19:59:55 +00:00
|
|
|
lastHealth []string
|
2022-03-21 21:10:25 +00:00
|
|
|
lastPopBrowserURL string
|
2022-08-09 17:16:34 +00:00
|
|
|
stickyDebug tailcfg.Debug // accumulated opt.Bool values
|
2022-09-06 23:34:16 +00:00
|
|
|
lastTKAInfo *tailcfg.TKAInfo
|
2023-08-21 02:52:52 +00:00
|
|
|
lastNetmapSummary string // from NetworkMap.VeryConcise
|
2021-04-19 02:49:25 +00:00
|
|
|
}
|
|
|
|
|
2023-08-21 02:52:52 +00:00
|
|
|
// newMapSession returns a mostly unconfigured new mapSession.
|
|
|
|
//
|
|
|
|
// Modify its optional fields on the returned value before use.
|
|
|
|
//
|
|
|
|
// It must have its Close method called to release resources.
|
2023-09-11 19:03:39 +00:00
|
|
|
func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater, controlKnobs *controlknobs.Knobs) *mapSession {
|
2021-04-19 02:49:25 +00:00
|
|
|
ms := &mapSession{
|
2023-09-02 02:28:00 +00:00
|
|
|
netmapUpdater: nu,
|
2023-09-11 19:03:39 +00:00
|
|
|
controlKnobs: controlKnobs,
|
2023-01-11 20:59:08 +00:00
|
|
|
privateNodeKey: privateNodeKey,
|
2023-08-22 15:07:26 +00:00
|
|
|
publicNodeKey: privateNodeKey.Public(),
|
2023-01-11 20:59:08 +00:00
|
|
|
lastDNSConfig: new(tailcfg.DNSConfig),
|
|
|
|
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
|
2023-08-21 02:52:52 +00:00
|
|
|
watchdogReset: make(chan struct{}),
|
|
|
|
|
|
|
|
// Non-nil no-op defaults, to be optionally overridden by the caller.
|
|
|
|
logf: logger.Discard,
|
|
|
|
vlogf: logger.Discard,
|
|
|
|
cancel: func() {},
|
|
|
|
onDebug: func(context.Context, *tailcfg.Debug, chan<- struct{}) error { return nil },
|
|
|
|
onConciseNetMapSummary: func(string) {},
|
|
|
|
onSelfNodeChanged: func(*netmap.NetworkMap) {},
|
2021-04-19 02:49:25 +00:00
|
|
|
}
|
2023-08-21 02:52:52 +00:00
|
|
|
ms.sessionAliveCtx, ms.sessionAliveCtxClose = context.WithCancel(context.Background())
|
2021-04-19 02:49:25 +00:00
|
|
|
return ms
|
|
|
|
}
|
|
|
|
|
2023-08-21 02:52:52 +00:00
|
|
|
func (ms *mapSession) clock() tstime.Clock {
|
|
|
|
return cmpx.Or[tstime.Clock](ms.altClock, tstime.StdClock{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// StartWatchdog starts the session's watchdog timer.
|
|
|
|
// If there's no activity in too long, it tears down the connection.
|
|
|
|
// Call Close to release these resources.
|
|
|
|
func (ms *mapSession) StartWatchdog() {
|
|
|
|
timer, timedOutChan := ms.clock().NewTimer(watchdogTimeout)
|
|
|
|
go func() {
|
|
|
|
defer timer.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ms.sessionAliveCtx.Done():
|
|
|
|
ms.vlogf("netmap: ending timeout goroutine")
|
|
|
|
return
|
|
|
|
case <-timedOutChan:
|
|
|
|
ms.logf("map response long-poll timed out!")
|
|
|
|
ms.cancel()
|
|
|
|
return
|
|
|
|
case <-ms.watchdogReset:
|
|
|
|
if !timer.Stop() {
|
|
|
|
select {
|
|
|
|
case <-timedOutChan:
|
|
|
|
case <-ms.sessionAliveCtx.Done():
|
|
|
|
ms.vlogf("netmap: ending timeout goroutine")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ms.vlogf("netmap: reset timeout timer")
|
|
|
|
timer.Reset(watchdogTimeout)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ms *mapSession) Close() {
|
|
|
|
ms.sessionAliveCtxClose()
|
|
|
|
}
|
|
|
|
|
|
|
|
// HandleNonKeepAliveMapResponse handles a non-KeepAlive MapResponse (full or
|
|
|
|
// incremental).
|
|
|
|
//
|
|
|
|
// All fields that are valid on a KeepAlive MapResponse have already been
|
|
|
|
// handled.
|
|
|
|
//
|
|
|
|
// TODO(bradfitz): make this handle all fields later. For now (2023-08-20) this
|
|
|
|
// is [re]factoring progress enough.
|
|
|
|
func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *tailcfg.MapResponse) error {
|
|
|
|
if debug := resp.Debug; debug != nil {
|
|
|
|
if err := ms.onDebug(ctx, debug, ms.watchdogReset); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if DevKnob.StripEndpoints() {
|
|
|
|
for _, p := range resp.Peers {
|
|
|
|
p.Endpoints = nil
|
|
|
|
}
|
|
|
|
for _, p := range resp.PeersChanged {
|
|
|
|
p.Endpoints = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For responses that mutate the self node, check for updated nodeAttrs.
|
|
|
|
if resp.Node != nil {
|
|
|
|
if DevKnob.StripCaps() {
|
|
|
|
resp.Node.Capabilities = nil
|
|
|
|
}
|
2023-09-12 19:36:53 +00:00
|
|
|
ms.controlKnobs.UpdateFromNodeAttributes(resp.Node.Capabilities)
|
2023-08-21 02:52:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Call Node.InitDisplayNames on any changed nodes.
|
2023-08-21 22:14:22 +00:00
|
|
|
initDisplayNames(cmpx.Or(resp.Node.View(), ms.lastNode), resp)
|
2023-08-21 02:52:52 +00:00
|
|
|
|
2023-08-22 20:32:04 +00:00
|
|
|
ms.patchifyPeersChanged(resp)
|
|
|
|
|
2023-08-22 15:07:26 +00:00
|
|
|
ms.updateStateFromResponse(resp)
|
|
|
|
|
2023-09-02 02:28:00 +00:00
|
|
|
if ms.tryHandleIncrementally(resp) {
|
|
|
|
ms.onConciseNetMapSummary(ms.lastNetmapSummary) // every 5s log
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have to rebuild the whole netmap (lots of garbage & work downstream of
|
|
|
|
// our UpdateFullNetmap call). This is the part we tried to avoid but
|
|
|
|
// some field mutations (especially rare ones) aren't yet handled.
|
2023-08-21 02:52:52 +00:00
|
|
|
|
2023-09-02 02:28:00 +00:00
|
|
|
nm := ms.netmap()
|
2023-08-21 02:52:52 +00:00
|
|
|
ms.lastNetmapSummary = nm.VeryConcise()
|
|
|
|
ms.onConciseNetMapSummary(ms.lastNetmapSummary)
|
|
|
|
|
|
|
|
// If the self node changed, we might need to update persist.
|
|
|
|
if resp.Node != nil {
|
|
|
|
ms.onSelfNodeChanged(nm)
|
|
|
|
}
|
|
|
|
|
2023-09-02 02:28:00 +00:00
|
|
|
ms.netmapUpdater.UpdateFullNetmap(nm)
|
2023-08-21 02:52:52 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-02 02:28:00 +00:00
|
|
|
func (ms *mapSession) tryHandleIncrementally(res *tailcfg.MapResponse) bool {
|
|
|
|
if ms.controlKnobs != nil && ms.controlKnobs.DisableDeltaUpdates.Load() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
nud, ok := ms.netmapUpdater.(NetmapDeltaUpdater)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
mutations, ok := netmap.MutationsFromMapResponse(res, time.Now())
|
|
|
|
if ok && len(mutations) > 0 {
|
|
|
|
return nud.UpdateNetmapDelta(mutations)
|
|
|
|
}
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2023-08-22 15:07:26 +00:00
|
|
|
// updateStats are some stats from updateStateFromResponse, primarily for
|
|
|
|
// testing. It's meant to be cheap enough to always compute, though. It doesn't
|
|
|
|
// allocate.
|
|
|
|
type updateStats struct {
|
|
|
|
allNew bool
|
|
|
|
added int
|
|
|
|
removed int
|
|
|
|
changed int
|
2021-04-19 02:49:25 +00:00
|
|
|
}
|
|
|
|
|
2023-08-22 15:07:26 +00:00
|
|
|
// updateStateFromResponse updates ms from res. It takes ownership of res.
|
|
|
|
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
|
|
|
|
ms.updatePeersStateFromResponse(resp)
|
|
|
|
|
|
|
|
if resp.Node != nil {
|
|
|
|
ms.lastNode = resp.Node.View()
|
|
|
|
}
|
2021-04-19 02:49:25 +00:00
|
|
|
|
|
|
|
for _, up := range resp.UserProfiles {
|
|
|
|
ms.lastUserProfile[up.ID] = up
|
|
|
|
}
|
2023-08-22 15:07:26 +00:00
|
|
|
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
|
2021-04-19 02:49:25 +00:00
|
|
|
|
2023-07-12 18:45:46 +00:00
|
|
|
if dm := resp.DERPMap; dm != nil {
|
2021-04-19 02:49:25 +00:00
|
|
|
ms.vlogf("netmap: new map contains DERP map")
|
2023-07-12 18:45:46 +00:00
|
|
|
|
|
|
|
// Zero-valued fields in a DERPMap mean that we're not changing
|
|
|
|
// anything and are using the previous value(s).
|
|
|
|
if ldm := ms.lastDERPMap; ldm != nil {
|
|
|
|
if dm.Regions == nil {
|
|
|
|
dm.Regions = ldm.Regions
|
|
|
|
dm.OmitDefaultRegions = ldm.OmitDefaultRegions
|
|
|
|
}
|
|
|
|
if dm.HomeParams == nil {
|
|
|
|
dm.HomeParams = ldm.HomeParams
|
|
|
|
} else if oldhh := ldm.HomeParams; oldhh != nil {
|
|
|
|
// Propagate sub-fields of HomeParams
|
|
|
|
hh := dm.HomeParams
|
|
|
|
if hh.RegionScore == nil {
|
|
|
|
hh.RegionScore = oldhh.RegionScore
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ms.lastDERPMap = dm
|
2021-04-19 02:49:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if pf := resp.PacketFilter; pf != nil {
|
|
|
|
var err error
|
2023-01-03 23:39:32 +00:00
|
|
|
ms.lastPacketFilterRules = views.SliceOf(pf)
|
2021-04-19 02:49:25 +00:00
|
|
|
ms.lastParsedPacketFilter, err = filter.MatchesFromFilterRules(pf)
|
|
|
|
if err != nil {
|
|
|
|
ms.logf("parsePacketFilter: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c := resp.DNSConfig; c != nil {
|
|
|
|
ms.lastDNSConfig = c
|
|
|
|
}
|
2022-02-10 22:12:34 +00:00
|
|
|
if p := resp.SSHPolicy; p != nil {
|
|
|
|
ms.lastSSHPolicy = p
|
|
|
|
}
|
2021-04-19 02:49:25 +00:00
|
|
|
|
|
|
|
if v, ok := resp.CollectServices.Get(); ok {
|
|
|
|
ms.collectServices = v
|
|
|
|
}
|
2021-04-19 03:48:24 +00:00
|
|
|
if resp.Domain != "" {
|
|
|
|
ms.lastDomain = resp.Domain
|
|
|
|
}
|
2022-10-06 23:19:38 +00:00
|
|
|
if resp.DomainDataPlaneAuditLogID != "" {
|
|
|
|
ms.lastDomainAuditLogID = resp.DomainDataPlaneAuditLogID
|
|
|
|
}
|
2021-09-18 19:59:55 +00:00
|
|
|
if resp.Health != nil {
|
|
|
|
ms.lastHealth = resp.Health
|
|
|
|
}
|
2022-09-06 23:34:16 +00:00
|
|
|
if resp.TKAInfo != nil {
|
|
|
|
ms.lastTKAInfo = resp.TKAInfo
|
|
|
|
}
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
|
2023-08-22 20:32:04 +00:00
|
|
|
var (
|
|
|
|
patchDERPRegion = clientmetric.NewCounter("controlclient_patch_derp")
|
|
|
|
patchEndpoints = clientmetric.NewCounter("controlclient_patch_endpoints")
|
|
|
|
patchCap = clientmetric.NewCounter("controlclient_patch_capver")
|
|
|
|
patchKey = clientmetric.NewCounter("controlclient_patch_key")
|
|
|
|
patchDiscoKey = clientmetric.NewCounter("controlclient_patch_discokey")
|
|
|
|
patchOnline = clientmetric.NewCounter("controlclient_patch_online")
|
|
|
|
patchLastSeen = clientmetric.NewCounter("controlclient_patch_lastseen")
|
|
|
|
patchKeyExpiry = clientmetric.NewCounter("controlclient_patch_keyexpiry")
|
|
|
|
patchCapabilities = clientmetric.NewCounter("controlclient_patch_capabilities")
|
|
|
|
patchKeySignature = clientmetric.NewCounter("controlclient_patch_keysig")
|
|
|
|
|
|
|
|
patchifiedPeer = clientmetric.NewCounter("controlclient_patchified_peer")
|
|
|
|
patchifiedPeerEqual = clientmetric.NewCounter("controlclient_patchified_peer_equal")
|
|
|
|
)
|
|
|
|
|
2023-08-22 15:07:26 +00:00
|
|
|
// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res.
|
|
|
|
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
|
|
|
|
defer func() {
|
|
|
|
if stats.removed > 0 || stats.added > 0 {
|
|
|
|
ms.rebuildSorted()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if ms.peers == nil {
|
|
|
|
ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp.Peers) > 0 {
|
|
|
|
// Not delta encoded.
|
|
|
|
stats.allNew = true
|
|
|
|
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
|
|
|
|
for _, n := range resp.Peers {
|
|
|
|
keep[n.ID] = true
|
|
|
|
if vp, ok := ms.peers[n.ID]; ok {
|
|
|
|
stats.changed++
|
|
|
|
*vp = n.View()
|
|
|
|
} else {
|
|
|
|
stats.added++
|
|
|
|
ms.peers[n.ID] = ptr.To(n.View())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for id := range ms.peers {
|
|
|
|
if !keep[id] {
|
|
|
|
stats.removed++
|
|
|
|
delete(ms.peers, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Peers precludes all other delta operations so just return.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range resp.PeersRemoved {
|
|
|
|
if _, ok := ms.peers[id]; ok {
|
|
|
|
delete(ms.peers, id)
|
|
|
|
stats.removed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, n := range resp.PeersChanged {
|
|
|
|
if vp, ok := ms.peers[n.ID]; ok {
|
|
|
|
stats.changed++
|
|
|
|
*vp = n.View()
|
|
|
|
} else {
|
|
|
|
stats.added++
|
|
|
|
ms.peers[n.ID] = ptr.To(n.View())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for nodeID, seen := range resp.PeerSeenChange {
|
|
|
|
if vp, ok := ms.peers[nodeID]; ok {
|
|
|
|
mut := vp.AsStruct()
|
|
|
|
if seen {
|
|
|
|
mut.LastSeen = ptr.To(clock.Now())
|
|
|
|
} else {
|
|
|
|
mut.LastSeen = nil
|
|
|
|
}
|
|
|
|
*vp = mut.View()
|
|
|
|
stats.changed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for nodeID, online := range resp.OnlineChange {
|
|
|
|
if vp, ok := ms.peers[nodeID]; ok {
|
|
|
|
mut := vp.AsStruct()
|
|
|
|
mut.Online = ptr.To(online)
|
|
|
|
*vp = mut.View()
|
|
|
|
stats.changed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, pc := range resp.PeersChangedPatch {
|
|
|
|
vp, ok := ms.peers[pc.NodeID]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
stats.changed++
|
|
|
|
mut := vp.AsStruct()
|
|
|
|
if pc.DERPRegion != 0 {
|
|
|
|
mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion)
|
2023-08-22 20:32:04 +00:00
|
|
|
patchDERPRegion.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if pc.Cap != 0 {
|
|
|
|
mut.Cap = pc.Cap
|
2023-08-22 20:32:04 +00:00
|
|
|
patchCap.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if pc.Endpoints != nil {
|
|
|
|
mut.Endpoints = pc.Endpoints
|
2023-08-22 20:32:04 +00:00
|
|
|
patchEndpoints.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if pc.Key != nil {
|
|
|
|
mut.Key = *pc.Key
|
2023-08-22 20:32:04 +00:00
|
|
|
patchKey.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if pc.DiscoKey != nil {
|
|
|
|
mut.DiscoKey = *pc.DiscoKey
|
2023-08-22 20:32:04 +00:00
|
|
|
patchDiscoKey.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if v := pc.Online; v != nil {
|
|
|
|
mut.Online = ptr.To(*v)
|
2023-08-22 20:32:04 +00:00
|
|
|
patchOnline.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if v := pc.LastSeen; v != nil {
|
|
|
|
mut.LastSeen = ptr.To(*v)
|
2023-08-22 20:32:04 +00:00
|
|
|
patchLastSeen.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if v := pc.KeyExpiry; v != nil {
|
|
|
|
mut.KeyExpiry = *v
|
2023-08-22 20:32:04 +00:00
|
|
|
patchKeyExpiry.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if v := pc.Capabilities; v != nil {
|
|
|
|
mut.Capabilities = *v
|
2023-08-22 20:32:04 +00:00
|
|
|
patchCapabilities.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
if v := pc.KeySignature; v != nil {
|
|
|
|
mut.KeySignature = v
|
2023-08-22 20:32:04 +00:00
|
|
|
patchKeySignature.Add(1)
|
2023-08-22 15:07:26 +00:00
|
|
|
}
|
|
|
|
*vp = mut.View()
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called
|
|
|
|
// after any additions or removals from peers.
|
|
|
|
func (ms *mapSession) rebuildSorted() {
|
|
|
|
if ms.sortedPeers == nil {
|
|
|
|
ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers))
|
|
|
|
} else {
|
|
|
|
if len(ms.sortedPeers) > len(ms.peers) {
|
|
|
|
clear(ms.sortedPeers[len(ms.peers):])
|
|
|
|
}
|
|
|
|
ms.sortedPeers = ms.sortedPeers[:0]
|
|
|
|
}
|
|
|
|
for _, p := range ms.peers {
|
|
|
|
ms.sortedPeers = append(ms.sortedPeers, p)
|
|
|
|
}
|
|
|
|
sort.Slice(ms.sortedPeers, func(i, j int) bool {
|
|
|
|
return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
|
|
|
|
if userID == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, dup := nm.UserProfiles[userID]; dup {
|
|
|
|
// Already populated it from a previous peer.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if up, ok := ms.lastUserProfile[userID]; ok {
|
|
|
|
nm.UserProfiles[userID] = up
|
|
|
|
}
|
|
|
|
}
|
2021-04-19 02:49:25 +00:00
|
|
|
|
2023-08-22 20:32:04 +00:00
|
|
|
var debugPatchifyPeer = envknob.RegisterBool("TS_DEBUG_PATCHIFY_PEER")
|
|
|
|
|
|
|
|
// patchifyPeersChanged mutates resp to promote PeersChanged entries to PeersChangedPatch
|
|
|
|
// when possible.
|
|
|
|
func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) {
|
|
|
|
filtered := resp.PeersChanged[:0]
|
|
|
|
for _, n := range resp.PeersChanged {
|
|
|
|
if p, ok := ms.patchifyPeer(n); ok {
|
|
|
|
patchifiedPeer.Add(1)
|
|
|
|
if debugPatchifyPeer() {
|
|
|
|
patchj, _ := json.Marshal(p)
|
|
|
|
ms.logf("debug: patchifyPeer[ID=%v]: %s", n.ID, patchj)
|
|
|
|
}
|
|
|
|
if p != nil {
|
|
|
|
resp.PeersChangedPatch = append(resp.PeersChangedPatch, p)
|
|
|
|
} else {
|
|
|
|
patchifiedPeerEqual.Add(1)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
filtered = append(filtered, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resp.PeersChanged = filtered
|
|
|
|
if len(resp.PeersChanged) == 0 {
|
|
|
|
resp.PeersChanged = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var nodeFields = sync.OnceValue(getNodeFields)
|
|
|
|
|
|
|
|
// getNodeFields returns the fails of tailcfg.Node.
|
|
|
|
func getNodeFields() []string {
|
|
|
|
rt := reflect.TypeOf((*tailcfg.Node)(nil)).Elem()
|
|
|
|
ret := make([]string, rt.NumField())
|
|
|
|
for i := 0; i < rt.NumField(); i++ {
|
|
|
|
ret[i] = rt.Field(i).Name
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
// patchifyPeer returns a *tailcfg.PeerChange of the session's existing copy of
|
|
|
|
// the n.ID Node to n.
|
|
|
|
//
|
|
|
|
// It returns ok=false if a patch can't be made, (V, ok) on a delta, or (nil,
|
|
|
|
// true) if all the fields were identical (a zero change).
|
|
|
|
func (ms *mapSession) patchifyPeer(n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
|
|
|
|
was, ok := ms.peers[n.ID]
|
|
|
|
if !ok {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
return peerChangeDiff(*was, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// peerChangeDiff returns the difference from 'was' to 'n', if possible.
|
|
|
|
//
|
|
|
|
// It returns (nil, true) if the fields were identical.
|
|
|
|
func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChange, ok bool) {
|
|
|
|
var ret *tailcfg.PeerChange
|
|
|
|
pc := func() *tailcfg.PeerChange {
|
|
|
|
if ret == nil {
|
|
|
|
ret = new(tailcfg.PeerChange)
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
for _, field := range nodeFields() {
|
|
|
|
switch field {
|
|
|
|
default:
|
|
|
|
// The whole point of using reflect in this function is to panic
|
|
|
|
// here in tests if we forget to handle a new field.
|
|
|
|
panic("unhandled field: " + field)
|
|
|
|
case "computedHostIfDifferent", "ComputedName", "ComputedNameWithHost":
|
|
|
|
// Caller's responsibility to have populated these.
|
|
|
|
continue
|
|
|
|
case "DataPlaneAuditLogID":
|
|
|
|
// Not sent for peers.
|
|
|
|
case "ID":
|
|
|
|
if was.ID() != n.ID {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "StableID":
|
|
|
|
if was.StableID() != n.StableID {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Name":
|
|
|
|
if was.Name() != n.Name {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "User":
|
|
|
|
if was.User() != n.User {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Sharer":
|
|
|
|
if was.Sharer() != n.Sharer {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Key":
|
|
|
|
if was.Key() != n.Key {
|
|
|
|
pc().Key = ptr.To(n.Key)
|
|
|
|
}
|
|
|
|
case "KeyExpiry":
|
|
|
|
if !was.KeyExpiry().Equal(n.KeyExpiry) {
|
|
|
|
pc().KeyExpiry = ptr.To(n.KeyExpiry)
|
|
|
|
}
|
|
|
|
case "KeySignature":
|
|
|
|
if !was.KeySignature().Equal(n.KeySignature) {
|
|
|
|
pc().KeySignature = slices.Clone(n.KeySignature)
|
|
|
|
}
|
|
|
|
case "Machine":
|
|
|
|
if was.Machine() != n.Machine {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "DiscoKey":
|
|
|
|
if was.DiscoKey() != n.DiscoKey {
|
|
|
|
pc().DiscoKey = ptr.To(n.DiscoKey)
|
|
|
|
}
|
|
|
|
case "Addresses":
|
|
|
|
if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "AllowedIPs":
|
|
|
|
if !views.SliceEqual(was.AllowedIPs(), views.SliceOf(n.AllowedIPs)) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Endpoints":
|
|
|
|
if !views.SliceEqual(was.Endpoints(), views.SliceOf(n.Endpoints)) {
|
|
|
|
pc().Endpoints = slices.Clone(n.Endpoints)
|
|
|
|
}
|
|
|
|
case "DERP":
|
|
|
|
if was.DERP() != n.DERP {
|
|
|
|
ip, portStr, err := net.SplitHostPort(n.DERP)
|
|
|
|
if err != nil || ip != "127.3.3.40" {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
port, err := strconv.Atoi(portStr)
|
|
|
|
if err != nil || port < 1 || port > 65535 {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
pc().DERPRegion = port
|
|
|
|
}
|
|
|
|
case "Hostinfo":
|
|
|
|
if !was.Hostinfo().Valid() && !n.Hostinfo.Valid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !was.Hostinfo().Valid() || !n.Hostinfo.Valid() {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
if !was.Hostinfo().Equal(n.Hostinfo) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Created":
|
|
|
|
if !was.Created().Equal(n.Created) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Cap":
|
|
|
|
if was.Cap() != n.Cap {
|
|
|
|
pc().Cap = n.Cap
|
|
|
|
}
|
|
|
|
case "Tags":
|
|
|
|
if !views.SliceEqual(was.Tags(), views.SliceOf(n.Tags)) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "PrimaryRoutes":
|
|
|
|
if !views.SliceEqual(was.PrimaryRoutes(), views.SliceOf(n.PrimaryRoutes)) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Online":
|
|
|
|
wasOnline := was.Online()
|
|
|
|
if n.Online != nil && wasOnline != nil && *n.Online != *wasOnline {
|
|
|
|
pc().Online = ptr.To(*n.Online)
|
|
|
|
}
|
|
|
|
case "LastSeen":
|
|
|
|
wasSeen := was.LastSeen()
|
|
|
|
if n.LastSeen != nil && wasSeen != nil && !wasSeen.Equal(*n.LastSeen) {
|
|
|
|
pc().LastSeen = ptr.To(*n.LastSeen)
|
|
|
|
}
|
|
|
|
case "MachineAuthorized":
|
|
|
|
if was.MachineAuthorized() != n.MachineAuthorized {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Capabilities":
|
|
|
|
if !views.SliceEqual(was.Capabilities(), views.SliceOf(n.Capabilities)) {
|
|
|
|
pc().Capabilities = ptr.To(n.Capabilities)
|
|
|
|
}
|
|
|
|
case "UnsignedPeerAPIOnly":
|
|
|
|
if was.UnsignedPeerAPIOnly() != n.UnsignedPeerAPIOnly {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "IsWireGuardOnly":
|
|
|
|
if was.IsWireGuardOnly() != n.IsWireGuardOnly {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "Expired":
|
|
|
|
if was.Expired() != n.Expired {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
case "SelfNodeV4MasqAddrForThisPeer":
|
|
|
|
va, vb := was.SelfNodeV4MasqAddrForThisPeer(), n.SelfNodeV4MasqAddrForThisPeer
|
|
|
|
if va == nil && vb == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if va == nil || vb == nil || *va != *vb {
|
|
|
|
return nil, false
|
|
|
|
}
|
2023-09-14 17:04:31 +00:00
|
|
|
case "ExitNodeDNSResolvers":
|
|
|
|
va, vb := was.ExitNodeDNSResolvers(), views.SliceOfViews(n.ExitNodeDNSResolvers)
|
|
|
|
|
|
|
|
if va.Len() != vb.Len() {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range va.LenIter() {
|
|
|
|
if !va.At(i).Equal(vb.At(i)) {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-22 20:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if ret != nil {
|
|
|
|
ret.NodeID = n.ID
|
|
|
|
}
|
|
|
|
return ret, true
|
|
|
|
}
|
|
|
|
|
2023-08-22 15:07:26 +00:00
|
|
|
// netmap returns a fully populated NetworkMap from the last state seen from
|
|
|
|
// a call to updateStateFromResponse, filling in omitted
|
|
|
|
// information from prior MapResponse values.
|
|
|
|
func (ms *mapSession) netmap() *netmap.NetworkMap {
|
|
|
|
peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers))
|
|
|
|
for i, vp := range ms.sortedPeers {
|
|
|
|
peerViews[i] = *vp
|
2023-08-18 14:57:44 +00:00
|
|
|
}
|
|
|
|
|
2021-04-19 02:49:25 +00:00
|
|
|
nm := &netmap.NetworkMap{
|
2023-08-22 15:07:26 +00:00
|
|
|
NodeKey: ms.publicNodeKey,
|
2023-01-03 23:39:32 +00:00
|
|
|
PrivateKey: ms.privateNodeKey,
|
|
|
|
MachineKey: ms.machinePubKey,
|
2023-08-18 14:57:44 +00:00
|
|
|
Peers: peerViews,
|
2023-01-03 23:39:32 +00:00
|
|
|
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
|
|
|
|
Domain: ms.lastDomain,
|
|
|
|
DomainAuditLogID: ms.lastDomainAuditLogID,
|
|
|
|
DNS: *ms.lastDNSConfig,
|
|
|
|
PacketFilter: ms.lastParsedPacketFilter,
|
|
|
|
PacketFilterRules: ms.lastPacketFilterRules,
|
|
|
|
SSHPolicy: ms.lastSSHPolicy,
|
|
|
|
CollectServices: ms.collectServices,
|
|
|
|
DERPMap: ms.lastDERPMap,
|
|
|
|
ControlHealth: ms.lastHealth,
|
|
|
|
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
|
2021-04-19 02:49:25 +00:00
|
|
|
}
|
|
|
|
|
2022-09-06 23:34:16 +00:00
|
|
|
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
|
|
|
|
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
|
|
|
|
ms.logf("error unmarshalling TKAHead: %v", err)
|
|
|
|
nm.TKAEnabled = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-21 22:14:22 +00:00
|
|
|
if node := ms.lastNode; node.Valid() {
|
|
|
|
nm.SelfNode = node
|
|
|
|
nm.Expiry = node.KeyExpiry()
|
|
|
|
nm.Name = node.Name()
|
|
|
|
nm.Addresses = filterSelfAddresses(node.Addresses().AsSlice())
|
|
|
|
if node.MachineAuthorized() {
|
2021-04-19 17:56:48 +00:00
|
|
|
nm.MachineStatus = tailcfg.MachineAuthorized
|
|
|
|
} else {
|
|
|
|
nm.MachineStatus = tailcfg.MachineUnauthorized
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-22 15:07:26 +00:00
|
|
|
ms.addUserProfile(nm, nm.User())
|
|
|
|
for _, peer := range peerViews {
|
|
|
|
ms.addUserProfile(nm, peer.Sharer())
|
|
|
|
ms.addUserProfile(nm, peer.User())
|
2021-04-19 02:49:25 +00:00
|
|
|
}
|
2022-09-14 19:49:39 +00:00
|
|
|
if DevKnob.ForceProxyDNS() {
|
2021-04-19 02:49:25 +00:00
|
|
|
nm.DNS.Proxied = true
|
|
|
|
}
|
|
|
|
return nm
|
|
|
|
}
|
|
|
|
|
|
|
|
func nodesSorted(v []*tailcfg.Node) bool {
|
|
|
|
for i, n := range v {
|
|
|
|
if i > 0 && n.ID <= v[i-1].ID {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func sortNodes(v []*tailcfg.Node) {
|
|
|
|
sort.Slice(v, func(i, j int) bool { return v[i].ID < v[j].ID })
|
|
|
|
}
|
|
|
|
|
|
|
|
func cloneNodes(v1 []*tailcfg.Node) []*tailcfg.Node {
|
|
|
|
if v1 == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
v2 := make([]*tailcfg.Node, len(v1))
|
|
|
|
for i, n := range v1 {
|
|
|
|
v2[i] = n.Clone()
|
|
|
|
}
|
|
|
|
return v2
|
|
|
|
}
|
2021-06-28 22:25:58 +00:00
|
|
|
|
2022-09-14 19:49:39 +00:00
|
|
|
var debugSelfIPv6Only = envknob.RegisterBool("TS_DEBUG_SELF_V6_ONLY")
|
2021-06-28 22:25:58 +00:00
|
|
|
|
all: convert more code to use net/netip directly
perl -i -npe 's,netaddr.IPPrefixFrom,netip.PrefixFrom,' $(git grep -l -F netaddr.)
perl -i -npe 's,netaddr.IPPortFrom,netip.AddrPortFrom,' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPrefix,netip.Prefix,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPPort,netip.AddrPort,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IP\b,netip.Addr,g' $(git grep -l -F netaddr. )
perl -i -npe 's,netaddr.IPv6Raw\b,netip.AddrFrom16,g' $(git grep -l -F netaddr. )
goimports -w .
Then delete some stuff from the net/netaddr shim package which is no
longer neeed.
Updates #5162
Change-Id: Ia7a86893fe21c7e3ee1ec823e8aba288d4566cd8
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2022-07-26 04:14:09 +00:00
|
|
|
func filterSelfAddresses(in []netip.Prefix) (ret []netip.Prefix) {
|
2021-06-28 22:25:58 +00:00
|
|
|
switch {
|
|
|
|
default:
|
|
|
|
return in
|
2022-09-14 19:49:39 +00:00
|
|
|
case debugSelfIPv6Only():
|
2021-06-28 22:25:58 +00:00
|
|
|
for _, a := range in {
|
2022-07-25 03:08:42 +00:00
|
|
|
if a.Addr().Is6() {
|
2021-06-28 22:25:58 +00:00
|
|
|
ret = append(ret, a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
}
|