mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-11 16:47:35 +00:00
db: add sqlite "source of truth" schema
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:

committed by
Kristoffer Dalby

parent
855c48aec2
commit
c6736dd6d6
@@ -11,9 +11,11 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const fiveHundredMillis = 500 * time.Millisecond
|
||||
const oneHundredMillis = 100 * time.Millisecond
|
||||
const fiftyMillis = 50 * time.Millisecond
|
||||
const (
|
||||
fiveHundred = 500 * time.Millisecond
|
||||
oneHundred = 100 * time.Millisecond
|
||||
fifty = 50 * time.Millisecond
|
||||
)
|
||||
|
||||
// TestEphemeralGarbageCollectorGoRoutineLeak is a test for a goroutine leak in EphemeralGarbageCollector().
|
||||
// It creates a new EphemeralGarbageCollector, schedules several nodes for deletion with a short expiry,
|
||||
@@ -41,7 +43,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
go gc.Start()
|
||||
|
||||
// Schedule several nodes for deletion with short expiry
|
||||
const expiry = fiftyMillis
|
||||
const expiry = fifty
|
||||
const numNodes = 100
|
||||
|
||||
// Set up wait group for expected deletions
|
||||
@@ -56,7 +58,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
|
||||
// Check nodes are deleted
|
||||
deleteMutex.Lock()
|
||||
assert.Equal(t, numNodes, len(deletedIDs), "Not all nodes were deleted")
|
||||
assert.Len(t, deletedIDs, numNodes, "Not all nodes were deleted")
|
||||
deleteMutex.Unlock()
|
||||
|
||||
// Schedule and immediately cancel to test that part of the code
|
||||
@@ -76,7 +78,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
||||
|
||||
// Give any potential leaked goroutines a chance to exit
|
||||
// Still need a small sleep here as we're checking for absence of goroutines
|
||||
time.Sleep(oneHundredMillis)
|
||||
time.Sleep(oneHundred)
|
||||
|
||||
// Check for leaked goroutines
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
@@ -112,7 +114,7 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
go gc.Start()
|
||||
defer gc.Close()
|
||||
|
||||
const shortExpiry = fiftyMillis
|
||||
const shortExpiry = fifty
|
||||
const longExpiry = 1 * time.Hour
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
@@ -128,7 +130,7 @@ func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
|
||||
|
||||
// Verify that the node was deleted once
|
||||
deleteMutex.Lock()
|
||||
assert.Equal(t, 1, len(deletedIDs), "Node should be deleted exactly once")
|
||||
assert.Len(t, deletedIDs, 1, "Node should be deleted exactly once")
|
||||
assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted")
|
||||
deleteMutex.Unlock()
|
||||
}
|
||||
@@ -155,7 +157,7 @@ func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
defer gc.Close()
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
const expiry = fiftyMillis
|
||||
const expiry = fifty
|
||||
|
||||
// Schedule node for deletion
|
||||
gc.Schedule(nodeID, expiry)
|
||||
@@ -172,7 +174,7 @@ func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
}
|
||||
|
||||
deleteMutex.Lock()
|
||||
assert.Equal(t, 0, len(deletedIDs), "Node should not be deleted after cancellation")
|
||||
assert.Empty(t, deletedIDs, "Node should not be deleted after cancellation")
|
||||
deleteMutex.Unlock()
|
||||
|
||||
// Reschedule the node
|
||||
@@ -189,7 +191,7 @@ func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
|
||||
|
||||
// Verify final state
|
||||
deleteMutex.Lock()
|
||||
assert.Equal(t, 1, len(deletedIDs), "Node should be deleted after rescheduling")
|
||||
assert.Len(t, deletedIDs, 1, "Node should be deleted after rescheduling")
|
||||
assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted")
|
||||
deleteMutex.Unlock()
|
||||
}
|
||||
@@ -212,7 +214,7 @@ func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
|
||||
go gc.Start()
|
||||
|
||||
const longExpiry = 1 * time.Hour
|
||||
const shortExpiry = fiftyMillis
|
||||
const shortExpiry = fifty
|
||||
|
||||
// Schedule node deletion with a long expiry
|
||||
gc.Schedule(types.NodeID(1), longExpiry)
|
||||
@@ -225,7 +227,7 @@ func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
|
||||
|
||||
// Verify that no deletion occurred
|
||||
deleteMutex.Lock()
|
||||
assert.Equal(t, 0, len(deletedIDs), "No node should be deleted when GC is closed before timer fires")
|
||||
assert.Empty(t, deletedIDs, "No node should be deleted when GC is closed before timer fires")
|
||||
deleteMutex.Unlock()
|
||||
}
|
||||
|
||||
@@ -269,7 +271,7 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
// Give the GC time to fully close and clean up resources
|
||||
// This is still time-based but only affects when we check the goroutine count,
|
||||
// not the actual test logic
|
||||
time.Sleep(oneHundredMillis)
|
||||
time.Sleep(oneHundred)
|
||||
close(gcClosedCheck)
|
||||
}()
|
||||
|
||||
@@ -279,7 +281,7 @@ func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
|
||||
gc.Schedule(nodeID, 1*time.Millisecond)
|
||||
|
||||
// Set up a timeout channel for our test
|
||||
timeout := time.After(fiveHundredMillis)
|
||||
timeout := time.After(fiveHundred)
|
||||
|
||||
// Check if any node was deleted (which shouldn't happen)
|
||||
select {
|
||||
@@ -329,7 +331,7 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
// Number of concurrent scheduling goroutines
|
||||
const numSchedulers = 10
|
||||
const nodesPerScheduler = 50
|
||||
const schedulingDuration = fiveHundredMillis
|
||||
const schedulingDuration = fiveHundred
|
||||
|
||||
// Use WaitGroup to wait for all scheduling goroutines to finish
|
||||
var wg sync.WaitGroup
|
||||
@@ -339,14 +341,14 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
stopScheduling := make(chan struct{})
|
||||
|
||||
// Launch goroutines that continuously schedule nodes
|
||||
for i := 0; i < numSchedulers; i++ {
|
||||
for schedulerIndex := range numSchedulers {
|
||||
go func(schedulerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
baseNodeID := schedulerID * nodesPerScheduler
|
||||
|
||||
// Keep scheduling nodes until signaled to stop
|
||||
for j := 0; j < nodesPerScheduler; j++ {
|
||||
for j := range nodesPerScheduler {
|
||||
select {
|
||||
case <-stopScheduling:
|
||||
return
|
||||
@@ -358,7 +360,7 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}(schedulerIndex)
|
||||
}
|
||||
|
||||
// After a short delay, close the garbage collector while schedulers are still running
|
||||
@@ -377,7 +379,7 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
||||
wg.Wait()
|
||||
|
||||
// Wait a bit longer to allow any leaked goroutines to do their work
|
||||
time.Sleep(oneHundredMillis)
|
||||
time.Sleep(oneHundred)
|
||||
|
||||
// Check for leaks
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
|
Reference in New Issue
Block a user