2024-08-27 18:54:28 +02:00
|
|
|
|
package db
|
|
|
|
|
|
|
|
|
|
|
|
import (
|
2024-11-18 17:33:46 +01:00
|
|
|
|
"database/sql"
|
2024-08-27 18:54:28 +02:00
|
|
|
|
"os"
|
2025-01-23 14:58:42 +01:00
|
|
|
|
"os/exec"
|
2024-08-27 18:54:28 +02:00
|
|
|
|
"path/filepath"
|
2024-11-23 11:19:52 +01:00
|
|
|
|
"strings"
|
2024-08-27 18:54:28 +02:00
|
|
|
|
"testing"
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
|
"time"
|
2024-08-27 18:54:28 +02:00
|
|
|
|
|
|
|
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
2024-11-22 16:54:58 +01:00
|
|
|
|
"github.com/stretchr/testify/require"
|
2024-08-27 18:54:28 +02:00
|
|
|
|
"gorm.io/gorm"
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
|
"zgo.at/zcache/v2"
|
2024-08-27 18:54:28 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
// TestSQLiteMigrationAndDataValidation tests specific SQLite migration scenarios
|
|
|
|
|
|
// and validates data integrity after migration. All migrations that require data validation
|
|
|
|
|
|
// should be added here.
|
|
|
|
|
|
func TestSQLiteMigrationAndDataValidation(t *testing.T) {
|
2024-08-27 18:54:28 +02:00
|
|
|
|
tests := []struct {
|
|
|
|
|
|
dbPath string
|
|
|
|
|
|
wantFunc func(*testing.T, *HSDatabase)
|
|
|
|
|
|
}{
|
2024-09-29 13:00:27 +02:00
|
|
|
|
// at 14:15:06 ❯ go run ./cmd/headscale preauthkeys list
|
|
|
|
|
|
// ID | Key | Reusable | Ephemeral | Used | Expiration | Created | Tags
|
|
|
|
|
|
// 1 | 09b28f.. | false | false | false | 2024-09-27 | 2024-09-27 | tag:derp
|
|
|
|
|
|
// 2 | 3112b9.. | false | false | false | 2024-09-27 | 2024-09-27 | tag:derp
|
2025-02-07 10:22:23 +01:00
|
|
|
|
{
|
2025-05-21 11:08:33 +02:00
|
|
|
|
dbPath: "testdata/sqlite/failing-node-preauth-constraint_dump.sql",
|
|
|
|
|
|
wantFunc: func(t *testing.T, hsdb *HSDatabase) {
|
|
|
|
|
|
t.Helper()
|
|
|
|
|
|
// Comprehensive data preservation validation for node-preauth constraint issue
|
|
|
|
|
|
// Expected data from dump: 1 user, 2 api_keys, 6 nodes
|
|
|
|
|
|
|
|
|
|
|
|
// Verify users data preservation
|
|
|
|
|
|
users, err := Read(hsdb.DB, func(rx *gorm.DB) ([]types.User, error) {
|
|
|
|
|
|
return ListUsers(rx)
|
|
|
|
|
|
})
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
assert.Len(t, users, 1, "should preserve all 1 user from original schema")
|
|
|
|
|
|
|
|
|
|
|
|
// Verify api_keys data preservation
|
|
|
|
|
|
var apiKeyCount int
|
|
|
|
|
|
err = hsdb.DB.Raw("SELECT COUNT(*) FROM api_keys").Scan(&apiKeyCount).Error
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
assert.Equal(t, 2, apiKeyCount, "should preserve all 2 api_keys from original schema")
|
|
|
|
|
|
|
|
|
|
|
|
// Verify nodes data preservation and field validation
|
|
|
|
|
|
nodes, err := Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
|
2025-02-07 10:22:23 +01:00
|
|
|
|
return ListNodes(rx)
|
|
|
|
|
|
})
|
|
|
|
|
|
require.NoError(t, err)
|
2025-05-21 11:08:33 +02:00
|
|
|
|
assert.Len(t, nodes, 6, "should preserve all 6 nodes from original schema")
|
2025-02-07 10:22:23 +01:00
|
|
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
|
|
assert.Falsef(t, node.MachineKey.IsZero(), "expected non zero machinekey")
|
|
|
|
|
|
assert.Contains(t, node.MachineKey.String(), "mkey:")
|
|
|
|
|
|
assert.Falsef(t, node.NodeKey.IsZero(), "expected non zero nodekey")
|
|
|
|
|
|
assert.Contains(t, node.NodeKey.String(), "nodekey:")
|
|
|
|
|
|
assert.Falsef(t, node.DiscoKey.IsZero(), "expected non zero discokey")
|
|
|
|
|
|
assert.Contains(t, node.DiscoKey.String(), "discokey:")
|
|
|
|
|
|
assert.Nil(t, node.AuthKey)
|
|
|
|
|
|
assert.Nil(t, node.AuthKeyID)
|
|
|
|
|
|
}
|
|
|
|
|
|
},
|
|
|
|
|
|
},
|
2024-08-27 18:54:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
|
|
t.Run(tt.dbPath, func(t *testing.T) {
|
2025-05-21 11:08:33 +02:00
|
|
|
|
if !strings.HasSuffix(tt.dbPath, ".sql") {
|
|
|
|
|
|
t.Fatalf("TestSQLiteMigrationAndDataValidation only supports .sql files, got: %s", tt.dbPath)
|
2024-08-27 18:54:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
hsdb := dbForTestWithPath(t, tt.dbPath)
|
2024-08-27 18:54:28 +02:00
|
|
|
|
if tt.wantFunc != nil {
|
|
|
|
|
|
tt.wantFunc(t, hsdb)
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
func emptyCache() *zcache.Cache[types.RegistrationID, types.RegisterNode] {
|
|
|
|
|
|
return zcache.New[types.RegistrationID, types.RegisterNode](time.Minute, time.Hour)
|
|
|
|
|
|
}
|
2024-08-27 18:54:28 +02:00
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
func createSQLiteFromSQLFile(sqlFilePath, dbPath string) error {
|
|
|
|
|
|
db, err := sql.Open("sqlite", dbPath)
|
2024-08-27 18:54:28 +02:00
|
|
|
|
if err != nil {
|
2025-05-21 11:08:33 +02:00
|
|
|
|
return err
|
2024-08-27 18:54:28 +02:00
|
|
|
|
}
|
2025-05-21 11:08:33 +02:00
|
|
|
|
defer db.Close()
|
2024-08-27 18:54:28 +02:00
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
schemaContent, err := os.ReadFile(sqlFilePath)
|
2024-08-27 18:54:28 +02:00
|
|
|
|
if err != nil {
|
2025-05-21 11:08:33 +02:00
|
|
|
|
return err
|
2024-08-27 18:54:28 +02:00
|
|
|
|
}
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
_, err = db.Exec(string(schemaContent))
|
|
|
|
|
|
|
|
|
|
|
|
return err
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
|
}
|
2024-11-18 17:33:46 +01:00
|
|
|
|
|
2024-11-23 11:19:52 +01:00
|
|
|
|
// requireConstraintFailed checks if the error is a constraint failure with
|
|
|
|
|
|
// either SQLite and PostgreSQL error messages.
|
|
|
|
|
|
func requireConstraintFailed(t *testing.T, err error) {
|
|
|
|
|
|
t.Helper()
|
|
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
if !strings.Contains(err.Error(), "UNIQUE constraint failed:") && !strings.Contains(err.Error(), "violates unique constraint") {
|
|
|
|
|
|
require.Failf(t, "expected error to contain a constraint failure, got: %s", err.Error())
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-11-18 17:33:46 +01:00
|
|
|
|
func TestConstraints(t *testing.T) {
|
|
|
|
|
|
tests := []struct {
|
|
|
|
|
|
name string
|
|
|
|
|
|
run func(*testing.T, *gorm.DB)
|
|
|
|
|
|
}{
|
|
|
|
|
|
{
|
|
|
|
|
|
name: "no-duplicate-username-if-no-oidc",
|
|
|
|
|
|
run: func(t *testing.T, db *gorm.DB) {
|
2024-12-19 13:10:10 +01:00
|
|
|
|
_, err := CreateUser(db, types.User{Name: "user1"})
|
2024-11-18 17:33:46 +01:00
|
|
|
|
require.NoError(t, err)
|
2024-12-19 13:10:10 +01:00
|
|
|
|
_, err = CreateUser(db, types.User{Name: "user1"})
|
2024-11-23 11:19:52 +01:00
|
|
|
|
requireConstraintFailed(t, err)
|
2024-11-18 17:33:46 +01:00
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
name: "no-oidc-duplicate-username-and-id",
|
|
|
|
|
|
run: func(t *testing.T, db *gorm.DB) {
|
|
|
|
|
|
user := types.User{
|
|
|
|
|
|
Model: gorm.Model{ID: 1},
|
|
|
|
|
|
Name: "user1",
|
|
|
|
|
|
}
|
|
|
|
|
|
user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true}
|
|
|
|
|
|
|
|
|
|
|
|
err := db.Save(&user).Error
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
|
|
|
|
user = types.User{
|
|
|
|
|
|
Model: gorm.Model{ID: 2},
|
|
|
|
|
|
Name: "user1",
|
|
|
|
|
|
}
|
|
|
|
|
|
user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true}
|
|
|
|
|
|
|
|
|
|
|
|
err = db.Save(&user).Error
|
2024-11-23 11:19:52 +01:00
|
|
|
|
requireConstraintFailed(t, err)
|
2024-11-18 17:33:46 +01:00
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
name: "no-oidc-duplicate-id",
|
|
|
|
|
|
run: func(t *testing.T, db *gorm.DB) {
|
|
|
|
|
|
user := types.User{
|
|
|
|
|
|
Model: gorm.Model{ID: 1},
|
|
|
|
|
|
Name: "user1",
|
|
|
|
|
|
}
|
|
|
|
|
|
user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true}
|
|
|
|
|
|
|
|
|
|
|
|
err := db.Save(&user).Error
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
|
|
|
|
user = types.User{
|
|
|
|
|
|
Model: gorm.Model{ID: 2},
|
|
|
|
|
|
Name: "user1.1",
|
|
|
|
|
|
}
|
|
|
|
|
|
user.ProviderIdentifier = sql.NullString{String: "http://test.com/user1", Valid: true}
|
|
|
|
|
|
|
|
|
|
|
|
err = db.Save(&user).Error
|
2024-11-23 11:19:52 +01:00
|
|
|
|
requireConstraintFailed(t, err)
|
2024-11-18 17:33:46 +01:00
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
name: "allow-duplicate-username-cli-then-oidc",
|
|
|
|
|
|
run: func(t *testing.T, db *gorm.DB) {
|
2024-12-19 13:10:10 +01:00
|
|
|
|
_, err := CreateUser(db, types.User{Name: "user1"}) // Create CLI username
|
2024-11-18 17:33:46 +01:00
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
|
|
|
|
user := types.User{
|
2024-11-22 17:45:46 +01:00
|
|
|
|
Name: "user1",
|
|
|
|
|
|
ProviderIdentifier: sql.NullString{String: "http://test.com/user1", Valid: true},
|
2024-11-18 17:33:46 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
err = db.Save(&user).Error
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
{
|
|
|
|
|
|
name: "allow-duplicate-username-oidc-then-cli",
|
|
|
|
|
|
run: func(t *testing.T, db *gorm.DB) {
|
|
|
|
|
|
user := types.User{
|
2024-11-22 17:45:46 +01:00
|
|
|
|
Name: "user1",
|
|
|
|
|
|
ProviderIdentifier: sql.NullString{String: "http://test.com/user1", Valid: true},
|
2024-11-18 17:33:46 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
err := db.Save(&user).Error
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
2024-12-19 13:10:10 +01:00
|
|
|
|
_, err = CreateUser(db, types.User{Name: "user1"}) // Create CLI username
|
2024-11-18 17:33:46 +01:00
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
2024-11-23 11:19:52 +01:00
|
|
|
|
t.Run(tt.name+"-postgres", func(t *testing.T) {
|
|
|
|
|
|
db := newPostgresTestDB(t)
|
|
|
|
|
|
tt.run(t, db.DB.Debug())
|
|
|
|
|
|
})
|
|
|
|
|
|
t.Run(tt.name+"-sqlite", func(t *testing.T) {
|
|
|
|
|
|
db, err := newSQLiteTestDB()
|
2024-11-18 17:33:46 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
|
t.Fatalf("creating database: %s", err)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2024-11-22 17:45:46 +01:00
|
|
|
|
tt.run(t, db.DB.Debug())
|
2024-11-18 17:33:46 +01:00
|
|
|
|
})
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-01-23 14:58:42 +01:00
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
// TestPostgresMigrationAndDataValidation tests specific PostgreSQL migration scenarios
|
|
|
|
|
|
// and validates data integrity after migration. All migrations that require data validation
|
|
|
|
|
|
// should be added here.
|
|
|
|
|
|
//
|
|
|
|
|
|
// TODO(kradalby): Convert to use plain text SQL dumps instead of binary .pssql dumps for consistency
|
|
|
|
|
|
// with SQLite tests and easier version control.
|
|
|
|
|
|
func TestPostgresMigrationAndDataValidation(t *testing.T) {
|
2025-01-23 14:58:42 +01:00
|
|
|
|
tests := []struct {
|
|
|
|
|
|
name string
|
|
|
|
|
|
dbPath string
|
|
|
|
|
|
wantFunc func(*testing.T, *HSDatabase)
|
2025-12-02 12:01:25 +01:00
|
|
|
|
}{}
|
2025-01-23 14:58:42 +01:00
|
|
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
|
|
u := newPostgresDBForTest(t)
|
|
|
|
|
|
|
|
|
|
|
|
pgRestorePath, err := exec.LookPath("pg_restore")
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
t.Fatal("pg_restore not found in PATH. Please install it and ensure it is accessible.")
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Construct the pg_restore command
|
|
|
|
|
|
cmd := exec.Command(pgRestorePath, "--verbose", "--if-exists", "--clean", "--no-owner", "--dbname", u.String(), tt.dbPath)
|
|
|
|
|
|
|
|
|
|
|
|
// Set the output streams
|
|
|
|
|
|
cmd.Stdout = os.Stdout
|
|
|
|
|
|
cmd.Stderr = os.Stderr
|
|
|
|
|
|
|
|
|
|
|
|
// Execute the command
|
|
|
|
|
|
err = cmd.Run()
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
t.Fatalf("failed to restore postgres database: %s", err)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
db = newHeadscaleDBFromPostgresURL(t, u)
|
|
|
|
|
|
|
|
|
|
|
|
if tt.wantFunc != nil {
|
|
|
|
|
|
tt.wantFunc(t, db)
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-02-26 07:22:55 -08:00
|
|
|
|
|
|
|
|
|
|
func dbForTest(t *testing.T) *HSDatabase {
|
|
|
|
|
|
t.Helper()
|
2025-05-21 11:08:33 +02:00
|
|
|
|
return dbForTestWithPath(t, "")
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func dbForTestWithPath(t *testing.T, sqlFilePath string) *HSDatabase {
|
|
|
|
|
|
t.Helper()
|
2025-02-26 07:22:55 -08:00
|
|
|
|
|
|
|
|
|
|
dbPath := t.TempDir() + "/headscale_test.db"
|
|
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
// If SQL file path provided, validate and create database from it
|
|
|
|
|
|
if sqlFilePath != "" {
|
|
|
|
|
|
// Validate that the file is a SQL text file
|
|
|
|
|
|
if !strings.HasSuffix(sqlFilePath, ".sql") {
|
|
|
|
|
|
t.Fatalf("dbForTestWithPath only accepts .sql files, got: %s", sqlFilePath)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
err := createSQLiteFromSQLFile(sqlFilePath, dbPath)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
t.Fatalf("setting up database from SQL file %s: %s", sqlFilePath, err)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-02-26 07:22:55 -08:00
|
|
|
|
db, err := NewHeadscaleDatabase(
|
|
|
|
|
|
types.DatabaseConfig{
|
|
|
|
|
|
Type: "sqlite3",
|
|
|
|
|
|
Sqlite: types.SqliteConfig{
|
|
|
|
|
|
Path: dbPath,
|
|
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
"",
|
|
|
|
|
|
emptyCache(),
|
|
|
|
|
|
)
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
|
t.Fatalf("setting up database: %s", err)
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
|
if sqlFilePath != "" {
|
|
|
|
|
|
t.Logf("database set up from %s at: %s", sqlFilePath, dbPath)
|
|
|
|
|
|
} else {
|
|
|
|
|
|
t.Logf("database set up at: %s", dbPath)
|
|
|
|
|
|
}
|
2025-02-26 07:22:55 -08:00
|
|
|
|
|
|
|
|
|
|
return db
|
|
|
|
|
|
}
|
2025-05-21 11:08:33 +02:00
|
|
|
|
|
|
|
|
|
|
// TestSQLiteAllTestdataMigrations tests migration compatibility across all SQLite schemas
|
|
|
|
|
|
// in the testdata directory. It verifies they can be successfully migrated to the current
|
|
|
|
|
|
// schema version. This test only validates migration success, not data integrity.
|
|
|
|
|
|
//
|
2025-11-13 15:49:27 +01:00
|
|
|
|
// All test database files are SQL dumps (created with `sqlite3 headscale.db .dump`) generated
|
|
|
|
|
|
// with old Headscale binaries on empty databases (no user/node data). These dumps include the
|
|
|
|
|
|
// migration history in the `migrations` table, which allows the migration system to correctly
|
|
|
|
|
|
// skip already-applied migrations and only run new ones.
|
2025-05-21 11:08:33 +02:00
|
|
|
|
func TestSQLiteAllTestdataMigrations(t *testing.T) {
|
|
|
|
|
|
t.Parallel()
|
|
|
|
|
|
schemas, err := os.ReadDir("testdata/sqlite")
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
|
|
|
|
t.Logf("loaded %d schemas", len(schemas))
|
|
|
|
|
|
|
|
|
|
|
|
for _, schema := range schemas {
|
|
|
|
|
|
if schema.IsDir() {
|
|
|
|
|
|
continue
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
t.Logf("validating: %s", schema.Name())
|
|
|
|
|
|
|
|
|
|
|
|
t.Run(schema.Name(), func(t *testing.T) {
|
|
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
|
|
|
|
dbPath := t.TempDir() + "/headscale_test.db"
|
|
|
|
|
|
|
|
|
|
|
|
// Setup a database with the old schema
|
|
|
|
|
|
schemaPath := filepath.Join("testdata/sqlite", schema.Name())
|
|
|
|
|
|
err := createSQLiteFromSQLFile(schemaPath, dbPath)
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
|
|
|
|
_, err = NewHeadscaleDatabase(
|
|
|
|
|
|
types.DatabaseConfig{
|
|
|
|
|
|
Type: "sqlite3",
|
|
|
|
|
|
Sqlite: types.SqliteConfig{
|
|
|
|
|
|
Path: dbPath,
|
|
|
|
|
|
},
|
|
|
|
|
|
},
|
|
|
|
|
|
"",
|
|
|
|
|
|
emptyCache(),
|
|
|
|
|
|
)
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
})
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|