Remove variables and leftovers of pregenerated ACL content

Prior to the code reorg, we would generate rules from the Policy and
store it on the global object. Now we generate it on the fly for each node
and this commit cleans up the old variables to make sure we have no
unexpected side effects.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-05-31 18:45:04 +02:00 committed by Kristoffer Dalby
parent 084d1d5d6e
commit 725bbd7408
11 changed files with 410 additions and 292 deletions

View File

@ -87,8 +87,6 @@ type Headscale struct {
DERPServer *DERPServer
ACLPolicy *policy.ACLPolicy
aclRules []tailcfg.FilterRule
sshPolicy *tailcfg.SSHPolicy
lastStateChange *xsync.MapOf[string, time.Time]
@ -102,12 +100,6 @@ type Headscale struct {
stateUpdateChan chan struct{}
cancelStateUpdateChan chan struct{}
// TODO(kradalby): Temporary measure to make sure we can update policy
// across modules, will be removed when aclRules are no longer stored
// globally but generated per node basis.
policyUpdateChan chan struct{}
cancelPolicyUpdateChan chan struct{}
}
func NewHeadscale(cfg *Config) (*Headscale, error) {
@ -168,20 +160,15 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
dbString: dbString,
privateKey2019: privateKey,
noisePrivateKey: noisePrivateKey,
aclRules: tailcfg.FilterAllowAll, // default allowall
registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{},
lastStateChange: xsync.NewMapOf[time.Time](),
stateUpdateChan: make(chan struct{}),
cancelStateUpdateChan: make(chan struct{}),
policyUpdateChan: make(chan struct{}),
cancelPolicyUpdateChan: make(chan struct{}),
}
go app.watchStateChannel()
go app.watchPolicyChannel()
database, err := db.NewHeadscaleDatabase(
cfg.DBtype,
@ -189,7 +176,6 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
cfg.OIDC.StripEmaildomain,
app.dbDebug,
app.stateUpdateChan,
app.policyUpdateChan,
cfg.IPPrefixes,
cfg.BaseDomain)
if err != nil {
@ -750,10 +736,6 @@ func (h *Headscale) Serve() error {
close(h.stateUpdateChan)
close(h.cancelStateUpdateChan)
<-h.cancelPolicyUpdateChan
close(h.policyUpdateChan)
close(h.cancelPolicyUpdateChan)
// Close db connections
err = h.db.Close()
if err != nil {
@ -862,30 +844,6 @@ func (h *Headscale) watchStateChannel() {
}
}
// TODO(kradalby): baby steps, make this more robust.
func (h *Headscale) watchPolicyChannel() {
for {
select {
case <-h.policyUpdateChan:
machines, err := h.db.ListMachines()
if err != nil {
log.Error().Err(err).Msg("failed to fetch machines during policy update")
}
rules, sshPolicy, err := policy.GenerateFilterRules(h.ACLPolicy, machines, h.cfg.OIDC.StripEmaildomain)
if err != nil {
log.Error().Err(err).Msg("failed to update ACL rules")
}
h.aclRules = rules
h.sshPolicy = sshPolicy
case <-h.cancelPolicyUpdateChan:
return
}
}
}
func (h *Headscale) setLastStateChangeToNow() {
var err error

View File

@ -36,9 +36,8 @@ type KV struct {
}
type HSDatabase struct {
db *gorm.DB
notifyStateChan chan<- struct{}
notifyPolicyChan chan<- struct{}
db *gorm.DB
notifyStateChan chan<- struct{}
ipAllocationMutex sync.Mutex
@ -53,7 +52,6 @@ func NewHeadscaleDatabase(
dbType, connectionAddr string,
stripEmailDomain, debug bool,
notifyStateChan chan<- struct{},
notifyPolicyChan chan<- struct{},
ipPrefixes []netip.Prefix,
baseDomain string,
) (*HSDatabase, error) {
@ -63,9 +61,8 @@ func NewHeadscaleDatabase(
}
db := HSDatabase{
db: dbConn,
notifyStateChan: notifyStateChan,
notifyPolicyChan: notifyPolicyChan,
db: dbConn,
notifyStateChan: notifyStateChan,
ipPrefixes: ipPrefixes,
baseDomain: baseDomain,

View File

@ -33,7 +33,7 @@ var (
)
)
// ListPeers returns all peers of machine, regardless of any Policy.
// ListPeers returns all peers of machine, regardless of any Policy or if the node is expired.
func (hsdb *HSDatabase) ListPeers(machine *types.Machine) (types.Machines, error) {
log.Trace().
Caller().
@ -218,7 +218,6 @@ func (hsdb *HSDatabase) SetTags(
}
machine.ForcedTags = newTags
hsdb.notifyPolicyChan <- struct{}{}
hsdb.notifyStateChange()
if err := hsdb.db.Save(machine).Error; err != nil {

View File

@ -459,7 +459,7 @@ func (s *Suite) TestSetTags(c *check.C) {
types.StringList([]string{"tag:bar", "tag:test", "tag:unknown"}),
)
c.Assert(channelUpdates, check.Equals, int32(4))
c.Assert(channelUpdates, check.Equals, int32(2))
}
func TestHeadscale_generateGivenName(t *testing.T) {

View File

@ -62,7 +62,6 @@ func (s *Suite) ResetDB(c *check.C) {
false,
false,
sink,
sink,
[]netip.Prefix{
netip.MustParsePrefix("10.27.0.0/23"),
},

View File

@ -16,6 +16,7 @@ import (
"github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"tailscale.com/smallzstd"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
@ -69,33 +70,18 @@ func NewMapper(
}
}
func (m *Mapper) tempWrap(
machine *types.Machine,
pol *policy.ACLPolicy,
) (*tailcfg.MapResponse, error) {
peers, err := m.db.ListPeers(machine)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot fetch peers")
return nil, err
}
return fullMapResponse(
pol,
machine,
peers,
m.stripEmailDomain,
m.baseDomain,
m.dnsCfg,
m.derpMap,
m.logtail,
m.randomClientPort,
)
}
// TODO: Optimise
// As this work continues, the idea is that there will be one Mapper instance
// per node, attached to the open stream between the control and client.
// This means that this can hold a state per machine and we can use that to
// improve the mapresponses sent.
// We could:
// - Keep information about the previous mapresponse so we can send a diff
// - Store hashes
// - Create a "minifier" that removes info not needed for the node
// fullMapResponse is the internal function for generating a MapResponse
// for a machine.
func fullMapResponse(
pol *policy.ACLPolicy,
machine *types.Machine,
@ -113,11 +99,23 @@ func fullMapResponse(
return nil, err
}
rules, sshPolicy, err := policy.GenerateFilterRules(pol, peers, stripEmailDomain)
rules, sshPolicy, err := policy.GenerateFilterRules(
pol,
// The policy is currently calculated for the entire Headscale network
append(peers, *machine),
stripEmailDomain,
)
if err != nil {
return nil, err
}
// Filter out peers that have expired.
peers = lo.Filter(peers, func(item types.Machine, index int) bool {
return !item.IsExpired()
})
// If there are filter rules present, see if there are any machines that cannot
// access eachother at all and remove them from the peers.
if len(rules) > 0 {
peers = policy.FilterMachinesByACL(machine, peers, rules)
}
@ -278,12 +276,33 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine types.Machine) {
}
}
// CreateMapResponse returns a MapResponse for the given machine.
func (m Mapper) CreateMapResponse(
mapRequest tailcfg.MapRequest,
machine *types.Machine,
pol *policy.ACLPolicy,
) ([]byte, error) {
mapResponse, err := m.tempWrap(machine, pol)
peers, err := m.db.ListPeers(machine)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot fetch peers")
return nil, err
}
mapResponse, err := fullMapResponse(
pol,
machine,
peers,
m.stripEmailDomain,
m.baseDomain,
m.dnsCfg,
m.derpMap,
m.logtail,
m.randomClientPort,
)
if err != nil {
return nil, err
}

View File

@ -6,6 +6,7 @@ import (
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy"
@ -164,6 +165,155 @@ func Test_fullMapResponse(t *testing.T) {
lastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC)
expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)
mini := &types.Machine{
ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
GivenName: "mini",
UserID: 0,
User: types.User{Name: "mini"},
ForcedTags: []string{},
AuthKeyID: 0,
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{
{
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
Advertised: true,
Enabled: true,
IsPrimary: false,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("192.168.0.0/24")),
Advertised: true,
Enabled: true,
IsPrimary: true,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("172.0.0.0/10")),
Advertised: true,
Enabled: false,
IsPrimary: true,
},
},
CreatedAt: created,
}
tailMini := &tailcfg.Node{
ID: 0,
StableID: "0",
Name: "mini",
User: 0,
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"),
},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
}
peer1 := types.Machine{
ID: 1,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "peer1",
GivenName: "peer1",
UserID: 0,
User: types.User{Name: "mini"},
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{},
CreatedAt: created,
}
tailPeer1 := &tailcfg.Node{
ID: 1,
StableID: "1",
Name: "peer1",
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
}
peer2 := types.Machine{
ID: 2,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
GivenName: "peer2",
UserID: 1,
User: types.User{Name: "peer2"},
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{},
CreatedAt: created,
}
tests := []struct {
name string
pol *policy.ACLPolicy
@ -190,47 +340,9 @@ func Test_fullMapResponse(t *testing.T) {
// wantErr: true,
// },
{
name: "no-pol-no-peers-map-response",
pol: &policy.ACLPolicy{},
machine: &types.Machine{
ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
GivenName: "mini",
UserID: 0,
User: types.User{Name: "mini"},
ForcedTags: []string{},
AuthKeyID: 0,
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{
{
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
Advertised: true,
Enabled: true,
IsPrimary: false,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("192.168.0.0/24")),
Advertised: true,
Enabled: true,
IsPrimary: true,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("172.0.0.0/10")),
Advertised: true,
Enabled: false,
IsPrimary: true,
},
},
CreatedAt: created,
},
name: "no-pol-no-peers-map-response",
pol: &policy.ACLPolicy{},
machine: mini,
peers: []types.Machine{},
stripEmailDomain: false,
baseDomain: "",
@ -239,44 +351,8 @@ func Test_fullMapResponse(t *testing.T) {
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: &tailcfg.Node{
ID: 0,
StableID: "0",
Name: "mini",
User: 0,
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"),
},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
},
Node: tailMini,
KeepAlive: false,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{},
DNSConfig: &tailcfg.DNSConfig{},
@ -293,64 +369,11 @@ func Test_fullMapResponse(t *testing.T) {
wantErr: false,
},
{
name: "no-pol-map-response",
pol: &policy.ACLPolicy{},
machine: &types.Machine{
ID: 0,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
GivenName: "mini",
UserID: 0,
User: types.User{Name: "mini"},
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{
{
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
Advertised: true,
Enabled: true,
IsPrimary: false,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("192.168.0.0/24")),
Advertised: true,
Enabled: true,
IsPrimary: true,
},
{
Prefix: types.IPPrefix(netip.MustParsePrefix("172.0.0.0/10")),
Advertised: true,
Enabled: false,
IsPrimary: true,
},
},
CreatedAt: created,
},
name: "no-pol-with-peer-map-response",
pol: &policy.ACLPolicy{},
machine: mini,
peers: []types.Machine{
{
ID: 1,
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "peer1",
GivenName: "peer1",
UserID: 0,
User: types.User{Name: "mini"},
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{},
CreatedAt: created,
},
peer1,
},
stripEmailDomain: false,
baseDomain: "",
@ -360,77 +383,10 @@ func Test_fullMapResponse(t *testing.T) {
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: &tailcfg.Node{
ID: 0,
StableID: "0",
Name: "mini",
User: 0,
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"),
},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
},
DERPMap: &tailcfg.DERPMap{},
Node: tailMini,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
{
ID: 1,
StableID: "1",
Name: "peer1",
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
Endpoints: []string{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
LastSeen: &lastSeen,
Online: new(bool),
KeepAlive: true,
MachineAuthorized: true,
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
},
tailPeer1,
},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
@ -445,6 +401,55 @@ func Test_fullMapResponse(t *testing.T) {
},
wantErr: false,
},
{
name: "with-pol-map-response",
pol: &policy.ACLPolicy{
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"mini"},
Destinations: []string{"100.64.0.2:*"},
},
},
},
machine: mini,
peers: []types.Machine{
peer1,
peer2,
},
stripEmailDomain: false,
baseDomain: "",
dnsConfig: &tailcfg.DNSConfig{},
derpMap: &tailcfg.DERPMap{},
logtail: false,
randomClientPort: false,
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: tailMini,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
tailPeer1,
},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.2/32", Ports: tailcfg.PortRangeAny},
},
},
},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: nil,
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
},
},
wantErr: false,
},
}
for _, tt := range tests {
@ -467,6 +472,8 @@ func Test_fullMapResponse(t *testing.T) {
return
}
spew.Dump(got)
if diff := cmp.Diff(
tt.want,
got,

View File

@ -125,8 +125,9 @@ func GenerateFilterRules(
machines types.Machines,
stripEmailDomain bool,
) ([]tailcfg.FilterRule, *tailcfg.SSHPolicy, error) {
// If there is no policy defined, we default to allow all
if policy == nil {
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, nil
return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil
}
rules, err := policy.generateFilterRules(machines, stripEmailDomain)

View File

@ -2319,6 +2319,48 @@ func Test_getFilteredByACLPeers(t *testing.T) {
},
},
},
{
name: "failing-edge-case-during-p3-refactor",
args: args{
machines: []types.Machine{
{
ID: 1,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "peer1",
User: types.User{Name: "mini"},
},
{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: types.User{Name: "peer2"},
},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
{IP: "::/0", Ports: tailcfg.PortRangeAny},
},
},
},
machine: &types.Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
User: types.User{Name: "mini"},
},
},
want: []types.Machine{
{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: types.User{Name: "peer2"},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@ -59,10 +59,6 @@ func (h *Headscale) handlePollCommon(
// update ACLRules with peer informations (to update server tags if necessary)
if h.ACLPolicy != nil {
// TODO(kradalby): Since this is not blocking, I might have introduced a bug here.
// It will be resolved later as we change up the policy stuff.
h.policyUpdateChan <- struct{}{}
// update routes with peer information
err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, machine)
if err != nil {

View File

@ -1 +1,101 @@
package types
import (
"net/netip"
"testing"
"tailscale.com/tailcfg"
)
func Test_MachineCanAccess(t *testing.T) {
tests := []struct {
name string
machine1 Machine
machine2 Machine
rules []tailcfg.FilterRule
want bool
}{
{
name: "other-cant-access-src",
machine1: Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini",
User: User{Name: "mini"},
},
machine2: Machine{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: User{Name: "peer2"},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
},
},
},
want: false,
},
{
name: "dest-cant-access-src",
machine1: Machine{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: User{Name: "peer2"},
},
machine2: Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "mini",
User: User{Name: "mini"},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
},
},
},
want: false,
},
{
name: "src-can-access-dest",
machine1: Machine{
ID: 0,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "mini",
User: User{Name: "mini"},
},
machine2: Machine{
ID: 2,
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2",
User: User{Name: "peer2"},
},
rules: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny},
},
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.machine1.CanAccess(tt.rules, &tt.machine2)
if got != tt.want {
t.Errorf("canAccess() failed: want (%t), got (%t)", tt.want, got)
}
})
}
}