2023-05-21 19:37:59 +03:00
|
|
|
package db
|
2020-06-21 12:32:08 +02:00
|
|
|
|
|
|
|
import (
|
2022-07-06 13:39:10 +02:00
|
|
|
"context"
|
2023-11-19 22:37:04 +01:00
|
|
|
"database/sql"
|
2025-05-21 11:08:33 +02:00
|
|
|
_ "embed"
|
2024-09-29 13:00:27 +02:00
|
|
|
"encoding/json"
|
2020-06-21 12:32:08 +02:00
|
|
|
"errors"
|
2022-03-01 16:31:25 +00:00
|
|
|
"fmt"
|
2024-04-17 07:03:06 +02:00
|
|
|
"net/netip"
|
2024-02-17 13:36:19 +01:00
|
|
|
"path/filepath"
|
2025-02-26 07:22:55 -08:00
|
|
|
"slices"
|
2024-02-09 07:27:00 +01:00
|
|
|
"strconv"
|
2023-11-16 17:55:29 +01:00
|
|
|
"strings"
|
2022-02-23 16:15:20 +00:00
|
|
|
"time"
|
2020-06-21 12:32:08 +02:00
|
|
|
|
2022-02-22 16:18:25 +00:00
|
|
|
"github.com/glebarez/sqlite"
|
2023-12-10 15:46:14 +01:00
|
|
|
"github.com/go-gormigrate/gormigrate/v2"
|
2025-05-21 11:08:33 +02:00
|
|
|
"github.com/juanfont/headscale/hscontrol/db/sqliteconfig"
|
2024-07-22 08:56:00 +02:00
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/util"
|
2022-02-28 18:05:03 +00:00
|
|
|
"github.com/rs/zerolog/log"
|
2025-05-21 11:08:33 +02:00
|
|
|
"github.com/tailscale/squibble"
|
2021-06-24 15:44:19 +02:00
|
|
|
"gorm.io/driver/postgres"
|
|
|
|
"gorm.io/gorm"
|
2021-07-11 13:13:36 +02:00
|
|
|
"gorm.io/gorm/logger"
|
2024-10-02 11:41:58 +02:00
|
|
|
"gorm.io/gorm/schema"
|
2025-03-10 16:20:29 +01:00
|
|
|
"tailscale.com/net/tsaddr"
|
2024-09-29 13:00:27 +02:00
|
|
|
"tailscale.com/util/set"
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
"zgo.at/zcache/v2"
|
2020-06-21 12:32:08 +02:00
|
|
|
)
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
//go:embed schema.sql
|
|
|
|
var dbSchema string
|
|
|
|
|
2024-10-02 11:41:58 +02:00
|
|
|
func init() {
|
|
|
|
schema.RegisterSerializer("text", TextSerialiser{})
|
|
|
|
}
|
|
|
|
|
2023-12-10 15:46:14 +01:00
|
|
|
var errDatabaseNotSupported = errors.New("database type not supported")
|
2020-06-21 12:32:08 +02:00
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
var errForeignKeyConstraintsViolated = errors.New("foreign key constraints violated")
|
|
|
|
|
|
|
|
const (
|
|
|
|
maxIdleConns = 100
|
|
|
|
maxOpenConns = 100
|
|
|
|
contextTimeoutSecs = 10
|
|
|
|
)
|
|
|
|
|
2021-02-23 20:11:30 +01:00
|
|
|
// KV is a key-value store in a psql table. For future use...
|
2023-05-11 09:09:18 +02:00
|
|
|
// TODO(kradalby): Is this used for anything?
|
2020-06-21 12:32:08 +02:00
|
|
|
type KV struct {
|
|
|
|
Key string
|
|
|
|
Value string
|
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
type HSDatabase struct {
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
DB *gorm.DB
|
|
|
|
cfg *types.DatabaseConfig
|
2025-01-26 22:20:11 +01:00
|
|
|
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode]
|
2023-05-11 09:09:18 +02:00
|
|
|
|
2023-06-12 15:29:34 +02:00
|
|
|
baseDomain string
|
2023-05-11 09:09:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(kradalby): assemble this struct from toptions or something typed
|
|
|
|
// rather than arguments.
|
|
|
|
func NewHeadscaleDatabase(
|
2024-02-09 07:27:00 +01:00
|
|
|
cfg types.DatabaseConfig,
|
2023-05-11 09:09:18 +02:00
|
|
|
baseDomain string,
|
2025-01-26 22:20:11 +01:00
|
|
|
regCache *zcache.Cache[types.RegistrationID, types.RegisterNode],
|
2023-05-11 09:09:18 +02:00
|
|
|
) (*HSDatabase, error) {
|
2024-02-09 07:27:00 +01:00
|
|
|
dbConn, err := openDB(cfg)
|
2020-06-21 12:32:08 +02:00
|
|
|
if err != nil {
|
2023-05-11 09:09:18 +02:00
|
|
|
return nil, err
|
2020-06-21 12:32:08 +02:00
|
|
|
}
|
2021-07-04 21:40:46 +02:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
migrations := gormigrate.New(
|
|
|
|
dbConn,
|
|
|
|
gormigrate.DefaultOptions,
|
|
|
|
[]*gormigrate.Migration{
|
2024-08-27 18:54:28 +02:00
|
|
|
// New migrations must be added as transactions at the end of this list.
|
|
|
|
// The initial migration here is quite messy, completely out of order and
|
2024-02-09 22:04:28 +05:30
|
|
|
// has no versioning and is the tech debt of not having versioned migrations
|
|
|
|
// prior to this point. This first migration is all DB changes to bring a DB
|
|
|
|
// up to 0.23.0.
|
|
|
|
{
|
|
|
|
ID: "202312101416",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
if cfg.Type == types.DatabasePostgres {
|
|
|
|
tx.Exec(`create extension if not exists "uuid-ossp";`)
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = tx.Migrator().RenameTable("namespaces", "users")
|
|
|
|
|
|
|
|
// the big rename from Machine to Node
|
|
|
|
_ = tx.Migrator().RenameTable("machines", "nodes")
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Route{}, "machine_id", "node_id")
|
|
|
|
|
|
|
|
err = tx.AutoMigrate(types.User{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Node{}, "namespace_id", "user_id")
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
|
|
|
|
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
|
|
|
|
_ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
|
|
|
|
|
|
|
|
// GivenName is used as the primary source of DNS names, make sure
|
|
|
|
// the field is populated and normalized if it was not when the
|
|
|
|
// node was registered.
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Node{}, "nickname", "given_name")
|
|
|
|
|
2024-05-16 02:40:14 +02:00
|
|
|
dbConn.Model(&types.Node{}).Where("auth_key_id = ?", 0).Update("auth_key_id", nil)
|
2024-05-27 11:53:37 +01:00
|
|
|
// If the Node table has a column for registered,
|
2025-02-05 16:10:18 +01:00
|
|
|
// find all occurrences of "false" and drop them. Then
|
2024-02-09 22:04:28 +05:30
|
|
|
// remove the column.
|
|
|
|
if tx.Migrator().HasColumn(&types.Node{}, "registered") {
|
2023-12-10 15:46:14 +01:00
|
|
|
log.Info().
|
2024-02-09 22:04:28 +05:30
|
|
|
Msg(`Database has legacy "registered" column in node, removing...`)
|
|
|
|
|
|
|
|
nodes := types.Nodes{}
|
|
|
|
if err := tx.Not("registered").Find(&nodes).Error; err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error accessing db")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
log.Info().
|
2023-12-10 15:46:14 +01:00
|
|
|
Str("node", node.Hostname).
|
|
|
|
Str("machine_key", node.MachineKey.ShortString()).
|
2024-02-09 22:04:28 +05:30
|
|
|
Msg("Deleting unregistered node")
|
|
|
|
if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil {
|
|
|
|
log.Error().
|
|
|
|
Err(err).
|
|
|
|
Str("node", node.Hostname).
|
|
|
|
Str("machine_key", node.MachineKey.ShortString()).
|
|
|
|
Msg("Error deleting unregistered node")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := tx.Migrator().DropColumn(&types.Node{}, "registered")
|
|
|
|
if err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error dropping registered column")
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-29 23:08:54 -07:00
|
|
|
// Remove any invalid routes associated with a node that does not exist.
|
2024-08-27 18:54:28 +02:00
|
|
|
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) {
|
|
|
|
err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2024-08-29 23:08:54 -07:00
|
|
|
err = tx.AutoMigrate(&types.Route{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.Node{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
// Ensure all keys have correct prefixes
|
|
|
|
// https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35
|
|
|
|
type result struct {
|
|
|
|
ID uint64
|
|
|
|
MachineKey string
|
|
|
|
NodeKey string
|
|
|
|
DiscoKey string
|
|
|
|
}
|
|
|
|
var results []result
|
|
|
|
err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes").
|
|
|
|
Find(&results).
|
|
|
|
Error
|
2023-12-10 15:46:14 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-23 18:50:30 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
for _, node := range results {
|
|
|
|
mKey := node.MachineKey
|
|
|
|
if !strings.HasPrefix(node.MachineKey, "mkey:") {
|
|
|
|
mKey = "mkey:" + node.MachineKey
|
|
|
|
}
|
|
|
|
nKey := node.NodeKey
|
|
|
|
if !strings.HasPrefix(node.NodeKey, "nodekey:") {
|
|
|
|
nKey = "nodekey:" + node.NodeKey
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
dKey := node.DiscoKey
|
|
|
|
if !strings.HasPrefix(node.DiscoKey, "discokey:") {
|
|
|
|
dKey = "discokey:" + node.DiscoKey
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err := tx.Exec(
|
|
|
|
"UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id",
|
|
|
|
sql.Named("mKey", mKey),
|
|
|
|
sql.Named("nKey", nKey),
|
|
|
|
sql.Named("dKey", dKey),
|
|
|
|
sql.Named("id", node.ID),
|
|
|
|
).Error
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
|
|
|
|
log.Info().
|
|
|
|
Msgf("Database has legacy enabled_routes column in node, migrating...")
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
type NodeAux struct {
|
|
|
|
ID uint64
|
2024-10-02 11:41:58 +02:00
|
|
|
EnabledRoutes []netip.Prefix `gorm:"serializer:json"`
|
2024-02-09 22:04:28 +05:30
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
nodesAux := []NodeAux{}
|
|
|
|
err := tx.Table("nodes").
|
|
|
|
Select("id, enabled_routes").
|
|
|
|
Scan(&nodesAux).
|
|
|
|
Error
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal().Err(err).Msg("Error accessing db")
|
|
|
|
}
|
|
|
|
for _, node := range nodesAux {
|
|
|
|
for _, prefix := range node.EnabledRoutes {
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Err(err).
|
|
|
|
Str("enabled_route", prefix.String()).
|
|
|
|
Msg("Error parsing enabled_route")
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tx.Preload("Node").
|
2024-10-02 11:41:58 +02:00
|
|
|
Where("node_id = ? AND prefix = ?", node.ID, prefix).
|
2024-02-09 22:04:28 +05:30
|
|
|
First(&types.Route{}).
|
|
|
|
Error
|
|
|
|
if err == nil {
|
|
|
|
log.Info().
|
|
|
|
Str("enabled_route", prefix.String()).
|
|
|
|
Msg("Route already migrated to new table, skipping")
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
route := types.Route{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Advertised: true,
|
|
|
|
Enabled: true,
|
2024-10-02 11:41:58 +02:00
|
|
|
Prefix: prefix,
|
2024-02-09 22:04:28 +05:30
|
|
|
}
|
|
|
|
if err := tx.Create(&route).Error; err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error creating route")
|
|
|
|
} else {
|
|
|
|
log.Info().
|
|
|
|
Uint64("node_id", route.NodeID).
|
|
|
|
Str("prefix", prefix.String()).
|
|
|
|
Msg("Route migrated")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes")
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Err(err).
|
|
|
|
Msg("Error dropping enabled_routes column")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
if tx.Migrator().HasColumn(&types.Node{}, "given_name") {
|
|
|
|
nodes := types.Nodes{}
|
|
|
|
if err := tx.Find(&nodes).Error; err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error accessing db")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
for item, node := range nodes {
|
|
|
|
if node.GivenName == "" {
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Str("hostname", node.Hostname).
|
|
|
|
Err(err).
|
|
|
|
Msg("Failed to normalize node hostname in DB migration")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tx.Model(nodes[item]).Updates(types.Node{
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
GivenName: node.Hostname,
|
2024-02-09 22:04:28 +05:30
|
|
|
}).Error
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Str("hostname", node.Hostname).
|
|
|
|
Err(err).
|
|
|
|
Msg("Failed to save normalized node name in DB migration")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-11-23 18:50:30 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&KV{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-23 18:50:30 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.PreAuthKey{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-05-23 17:33:07 +01:00
|
|
|
|
2024-09-29 13:00:27 +02:00
|
|
|
type preAuthKeyACLTag struct {
|
|
|
|
ID uint64 `gorm:"primary_key"`
|
|
|
|
PreAuthKeyID uint64
|
|
|
|
Tag string
|
|
|
|
}
|
|
|
|
err = tx.AutoMigrate(&preAuthKeyACLTag{})
|
2024-02-09 22:04:28 +05:30
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-05-23 17:33:07 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
_ = tx.Migrator().DropTable("shared_machines")
|
2022-01-30 13:06:49 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.APIKey{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-21 12:32:08 +02:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
2023-12-10 15:46:14 +01:00
|
|
|
},
|
2024-02-09 22:04:28 +05:30
|
|
|
{
|
|
|
|
// drop key-value table, it is not used, and has not contained
|
|
|
|
// useful data for a long time or ever.
|
|
|
|
ID: "202312101430",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
return tx.Migrator().DropTable("kvs")
|
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
2023-12-10 15:46:14 +01:00
|
|
|
},
|
2024-02-18 23:22:07 +01:00
|
|
|
{
|
|
|
|
// remove last_successful_update from node table,
|
|
|
|
// no longer used.
|
|
|
|
ID: "202402151347",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
2024-05-27 11:53:37 +01:00
|
|
|
_ = tx.Migrator().DropColumn(&types.Node{}, "last_successful_update")
|
|
|
|
return nil
|
2024-02-18 23:22:07 +01:00
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2024-04-17 07:03:06 +02:00
|
|
|
{
|
|
|
|
// Replace column with IP address list with dedicated
|
|
|
|
// IP v4 and v6 column.
|
|
|
|
// Note that previously, the list _could_ contain more
|
|
|
|
// than two addresses, which should not really happen.
|
2024-07-22 08:56:00 +02:00
|
|
|
// In that case, the first occurrence of each type will
|
2024-04-17 07:03:06 +02:00
|
|
|
// be kept.
|
|
|
|
ID: "2024041121742",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
_ = tx.Migrator().AddColumn(&types.Node{}, "ipv4")
|
|
|
|
_ = tx.Migrator().AddColumn(&types.Node{}, "ipv6")
|
|
|
|
|
|
|
|
type node struct {
|
|
|
|
ID uint64 `gorm:"column:id"`
|
|
|
|
Addresses string `gorm:"column:ip_addresses"`
|
|
|
|
}
|
|
|
|
|
|
|
|
var nodes []node
|
|
|
|
|
|
|
|
_ = tx.Raw("SELECT id, ip_addresses FROM nodes").Scan(&nodes).Error
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
addrs := strings.Split(node.Addresses, ",")
|
|
|
|
|
|
|
|
if len(addrs) == 0 {
|
2024-04-17 11:09:22 +02:00
|
|
|
return fmt.Errorf("no addresses found for node(%d)", node.ID)
|
2024-04-17 07:03:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var v4 *netip.Addr
|
|
|
|
var v6 *netip.Addr
|
|
|
|
|
|
|
|
for _, addrStr := range addrs {
|
|
|
|
addr, err := netip.ParseAddr(addrStr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("parsing IP for node(%d) from database: %w", node.ID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if addr.Is4() && v4 == nil {
|
|
|
|
v4 = &addr
|
|
|
|
}
|
|
|
|
|
|
|
|
if addr.Is6() && v6 == nil {
|
|
|
|
v6 = &addr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-17 13:23:29 +00:00
|
|
|
if v4 != nil {
|
|
|
|
err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv4", v4.String()).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("saving ip addresses to new columns: %w", err)
|
|
|
|
}
|
2024-04-17 11:09:22 +02:00
|
|
|
}
|
|
|
|
|
2024-04-17 13:23:29 +00:00
|
|
|
if v6 != nil {
|
|
|
|
err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv6", v6.String()).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("saving ip addresses to new columns: %w", err)
|
|
|
|
}
|
2024-04-17 07:03:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = tx.Migrator().DropColumn(&types.Node{}, "ip_addresses")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2024-07-18 11:08:25 +05:30
|
|
|
{
|
|
|
|
ID: "202406021630",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
err := tx.AutoMigrate(&types.Policy{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
|
|
|
},
|
2024-09-29 13:00:27 +02:00
|
|
|
// denormalise the ACL tags for preauth keys back onto
|
|
|
|
// the preauth key table. We dont normalise or reuse and
|
|
|
|
// it is just a bunch of work for extra work.
|
|
|
|
{
|
|
|
|
ID: "202409271400",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
preauthkeyTags := map[uint64]set.Set[string]{}
|
|
|
|
|
|
|
|
type preAuthKeyACLTag struct {
|
|
|
|
ID uint64 `gorm:"primary_key"`
|
|
|
|
PreAuthKeyID uint64
|
|
|
|
Tag string
|
|
|
|
}
|
|
|
|
|
|
|
|
var aclTags []preAuthKeyACLTag
|
|
|
|
if err := tx.Find(&aclTags).Error; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the current tags.
|
|
|
|
for _, tag := range aclTags {
|
|
|
|
if preauthkeyTags[tag.PreAuthKeyID] == nil {
|
|
|
|
preauthkeyTags[tag.PreAuthKeyID] = set.SetOf([]string{tag.Tag})
|
|
|
|
} else {
|
|
|
|
preauthkeyTags[tag.PreAuthKeyID].Add(tag.Tag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add tags column and restore the tags.
|
|
|
|
_ = tx.Migrator().AddColumn(&types.PreAuthKey{}, "tags")
|
|
|
|
for keyID, tags := range preauthkeyTags {
|
|
|
|
s := tags.Slice()
|
|
|
|
j, err := json.Marshal(s)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := tx.Model(&types.PreAuthKey{}).Where("id = ?", keyID).Update("tags", string(j)).Error; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop the old table.
|
|
|
|
_ = tx.Migrator().DropTable(&preAuthKeyACLTag{})
|
2025-05-21 11:08:33 +02:00
|
|
|
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
|
|
|
},
|
|
|
|
{
|
2024-10-17 05:58:44 -06:00
|
|
|
// Pick up new user fields used for OIDC and to
|
|
|
|
// populate the user with more interesting information.
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
ID: "202407191627",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
2025-01-23 14:58:42 +01:00
|
|
|
// Fix an issue where the automigration in GORM expected a constraint to
|
|
|
|
// exists that didnt, and add the one it wanted.
|
|
|
|
// Fixes https://github.com/juanfont/headscale/issues/2351
|
|
|
|
if cfg.Type == types.DatabasePostgres {
|
|
|
|
err := tx.Exec(`
|
|
|
|
BEGIN;
|
|
|
|
DO $$
|
|
|
|
BEGIN
|
|
|
|
IF NOT EXISTS (
|
|
|
|
SELECT 1 FROM pg_constraint
|
|
|
|
WHERE conname = 'uni_users_name'
|
|
|
|
) THEN
|
|
|
|
ALTER TABLE users ADD CONSTRAINT uni_users_name UNIQUE (name);
|
|
|
|
END IF;
|
|
|
|
END $$;
|
|
|
|
|
|
|
|
DO $$
|
|
|
|
BEGIN
|
|
|
|
IF EXISTS (
|
|
|
|
SELECT 1 FROM pg_constraint
|
|
|
|
WHERE conname = 'users_name_key'
|
|
|
|
) THEN
|
|
|
|
ALTER TABLE users DROP CONSTRAINT users_name_key;
|
|
|
|
END IF;
|
|
|
|
END $$;
|
|
|
|
COMMIT;
|
|
|
|
`).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to rename constraint: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
err := tx.AutoMigrate(&types.User{})
|
|
|
|
if err != nil {
|
2025-02-07 10:22:23 +01:00
|
|
|
return fmt.Errorf("automigrating types.User: %w", err)
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
}
|
2024-09-29 13:00:27 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
2024-10-17 05:58:44 -06:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The unique constraint of Name has been dropped
|
|
|
|
// in favour of a unique together of name and
|
|
|
|
// provider identity.
|
|
|
|
ID: "202408181235",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
err := tx.AutoMigrate(&types.User{})
|
|
|
|
if err != nil {
|
2025-02-07 10:22:23 +01:00
|
|
|
return fmt.Errorf("automigrating types.User: %w", err)
|
2024-10-17 05:58:44 -06:00
|
|
|
}
|
|
|
|
|
2024-11-22 17:45:46 +01:00
|
|
|
// Set up indexes and unique constraints outside of GORM, it does not support
|
|
|
|
// conditional unique constraints.
|
|
|
|
// This ensures the following:
|
|
|
|
// - A user name and provider_identifier is unique
|
|
|
|
// - A provider_identifier is unique
|
|
|
|
// - A user name is unique if there is no provider_identifier is not set
|
|
|
|
for _, idx := range []string{
|
2024-11-23 11:19:52 +01:00
|
|
|
"DROP INDEX IF EXISTS idx_provider_identifier",
|
|
|
|
"DROP INDEX IF EXISTS idx_name_provider_identifier",
|
|
|
|
"CREATE UNIQUE INDEX IF NOT EXISTS idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;",
|
|
|
|
"CREATE UNIQUE INDEX IF NOT EXISTS idx_name_provider_identifier ON users (name,provider_identifier);",
|
|
|
|
"CREATE UNIQUE INDEX IF NOT EXISTS idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;",
|
2024-11-22 17:45:46 +01:00
|
|
|
} {
|
|
|
|
err = tx.Exec(idx).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("creating username index: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-17 05:58:44 -06:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
2025-01-23 13:40:23 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// Add a constraint to routes ensuring they cannot exist without a node.
|
|
|
|
ID: "202501221827",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
// Remove any invalid routes associated with a node that does not exist.
|
|
|
|
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) {
|
|
|
|
err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-30 14:57:06 +01:00
|
|
|
// Remove any invalid routes without a node_id.
|
|
|
|
if tx.Migrator().HasTable(&types.Route{}) {
|
|
|
|
err := tx.Exec("delete from routes where node_id is null").Error
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-23 13:40:23 +01:00
|
|
|
err := tx.AutoMigrate(&types.Route{})
|
|
|
|
if err != nil {
|
2025-02-07 10:22:23 +01:00
|
|
|
return fmt.Errorf("automigrating types.Route: %w", err)
|
2025-01-23 13:40:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
2025-02-01 09:31:13 +00:00
|
|
|
},
|
|
|
|
// Add back constraint so you cannot delete preauth keys that
|
|
|
|
// is still used by a node.
|
|
|
|
{
|
|
|
|
ID: "202501311657",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
err := tx.AutoMigrate(&types.PreAuthKey{})
|
|
|
|
if err != nil {
|
2025-02-07 10:22:23 +01:00
|
|
|
return fmt.Errorf("automigrating types.PreAuthKey: %w", err)
|
2025-02-01 09:31:13 +00:00
|
|
|
}
|
|
|
|
err = tx.AutoMigrate(&types.Node{})
|
|
|
|
if err != nil {
|
2025-02-07 10:22:23 +01:00
|
|
|
return fmt.Errorf("automigrating types.Node: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
|
|
|
},
|
2025-05-21 11:08:33 +02:00
|
|
|
// Ensure there are no nodes referring to a deleted preauthkey.
|
2025-02-07 10:22:23 +01:00
|
|
|
{
|
|
|
|
ID: "202502070949",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
if tx.Migrator().HasTable(&types.PreAuthKey{}) {
|
|
|
|
err := tx.Exec(`
|
|
|
|
UPDATE nodes
|
|
|
|
SET auth_key_id = NULL
|
|
|
|
WHERE auth_key_id IS NOT NULL
|
|
|
|
AND auth_key_id NOT IN (
|
|
|
|
SELECT id FROM pre_auth_keys
|
|
|
|
);
|
|
|
|
`).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("setting auth_key to null on nodes with non-existing keys: %w", err)
|
|
|
|
}
|
2025-02-01 09:31:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
2025-02-26 07:22:55 -08:00
|
|
|
},
|
|
|
|
// Migrate all routes from the Route table to the new field ApprovedRoutes
|
|
|
|
// in the Node table. Then drop the Route table.
|
|
|
|
{
|
|
|
|
ID: "202502131714",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
if !tx.Migrator().HasColumn(&types.Node{}, "approved_routes") {
|
|
|
|
err := tx.Migrator().AddColumn(&types.Node{}, "approved_routes")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("adding column types.Node: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeRoutes := map[uint64][]netip.Prefix{}
|
|
|
|
|
|
|
|
var routes []types.Route
|
|
|
|
err = tx.Find(&routes).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("fetching routes: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, route := range routes {
|
|
|
|
if route.Enabled {
|
|
|
|
nodeRoutes[route.NodeID] = append(nodeRoutes[route.NodeID], route.Prefix)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for nodeID, routes := range nodeRoutes {
|
2025-03-10 16:20:29 +01:00
|
|
|
tsaddr.SortPrefixes(routes)
|
2025-04-30 12:45:08 +03:00
|
|
|
routes = slices.Compact(routes)
|
2025-02-26 07:22:55 -08:00
|
|
|
|
|
|
|
data, err := json.Marshal(routes)
|
|
|
|
|
|
|
|
err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", data).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("saving approved routes to new column: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-03-02 10:22:47 +01:00
|
|
|
// Drop the old table.
|
|
|
|
_ = tx.Migrator().DropTable(&types.Route{})
|
|
|
|
|
2025-02-26 07:22:55 -08:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: "202502171819",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
2025-05-10 10:49:08 +03:00
|
|
|
// This migration originally removed the last_seen column
|
|
|
|
// from the node table, but it was added back in
|
|
|
|
// 202505091439.
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
|
|
|
},
|
|
|
|
// Add back last_seen column to node table.
|
|
|
|
{
|
|
|
|
ID: "202505091439",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
// Add back last_seen column to node table if it does not exist.
|
|
|
|
// This is a workaround for the fact that the last_seen column
|
|
|
|
// was removed in the 202502171819 migration, but only for some
|
|
|
|
// beta testers.
|
|
|
|
if !tx.Migrator().HasColumn(&types.Node{}, "last_seen") {
|
|
|
|
_ = tx.Migrator().AddColumn(&types.Node{}, "last_seen")
|
|
|
|
}
|
2025-02-26 07:22:55 -08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
2025-05-14 17:45:14 +03:00
|
|
|
},
|
|
|
|
// Fix the provider identifier for users that have a double slash in the
|
|
|
|
// provider identifier.
|
|
|
|
{
|
|
|
|
ID: "202505141324",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
users, err := ListUsers(tx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("listing users: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, user := range users {
|
|
|
|
user.ProviderIdentifier.String = types.CleanIdentifier(user.ProviderIdentifier.String)
|
|
|
|
|
|
|
|
err := tx.Save(user).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("saving user: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
2024-09-29 13:00:27 +02:00
|
|
|
},
|
2025-05-21 11:08:33 +02:00
|
|
|
// Schema migration to ensure all tables match the expected schema.
|
|
|
|
// This migration recreates all tables to match the exact structure in schema.sql,
|
|
|
|
// preserving all data during the process.
|
|
|
|
// Only SQLite will be migrated for consistency.
|
|
|
|
{
|
|
|
|
ID: "202507021200",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
// Only run on SQLite
|
|
|
|
if cfg.Type != types.DatabaseSqlite {
|
|
|
|
log.Info().Msg("Skipping schema migration on non-SQLite database")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info().Msg("Starting schema recreation with table renaming")
|
|
|
|
|
|
|
|
// Rename existing tables to _old versions
|
|
|
|
tablesToRename := []string{"users", "pre_auth_keys", "api_keys", "nodes", "policies"}
|
|
|
|
|
|
|
|
// Check if routes table exists and drop it (should have been migrated already)
|
|
|
|
var routesExists bool
|
|
|
|
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='routes'").Row().Scan(&routesExists)
|
|
|
|
if err == nil && routesExists {
|
|
|
|
log.Info().Msg("Dropping leftover routes table")
|
|
|
|
if err := tx.Exec("DROP TABLE routes").Error; err != nil {
|
|
|
|
return fmt.Errorf("dropping routes table: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop all indexes first to avoid conflicts
|
|
|
|
indexesToDrop := []string{
|
|
|
|
"idx_users_deleted_at",
|
2025-07-10 23:38:55 +02:00
|
|
|
"idx_provider_identifier",
|
2025-05-21 11:08:33 +02:00
|
|
|
"idx_name_provider_identifier",
|
|
|
|
"idx_name_no_provider_identifier",
|
|
|
|
"idx_api_keys_prefix",
|
|
|
|
"idx_policies_deleted_at",
|
|
|
|
}
|
2025-07-10 23:38:55 +02:00
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
for _, index := range indexesToDrop {
|
|
|
|
_ = tx.Exec("DROP INDEX IF EXISTS " + index).Error
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, table := range tablesToRename {
|
|
|
|
// Check if table exists before renaming
|
|
|
|
var exists bool
|
|
|
|
err := tx.Raw("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Row().Scan(&exists)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("checking if table %s exists: %w", table, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if exists {
|
|
|
|
// Drop old table if it exists from previous failed migration
|
|
|
|
_ = tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error
|
|
|
|
|
|
|
|
// Rename current table to _old
|
|
|
|
if err := tx.Exec("ALTER TABLE " + table + " RENAME TO " + table + "_old").Error; err != nil {
|
|
|
|
return fmt.Errorf("renaming table %s to %s_old: %w", table, table, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new tables with correct schema
|
|
|
|
tableCreationSQL := []string{
|
|
|
|
`CREATE TABLE users(
|
|
|
|
id integer PRIMARY KEY AUTOINCREMENT,
|
|
|
|
name text,
|
|
|
|
display_name text,
|
|
|
|
email text,
|
|
|
|
provider_identifier text,
|
|
|
|
provider text,
|
|
|
|
profile_pic_url text,
|
|
|
|
created_at datetime,
|
|
|
|
updated_at datetime,
|
|
|
|
deleted_at datetime
|
|
|
|
)`,
|
|
|
|
`CREATE TABLE pre_auth_keys(
|
|
|
|
id integer PRIMARY KEY AUTOINCREMENT,
|
|
|
|
key text,
|
|
|
|
user_id integer,
|
|
|
|
reusable numeric,
|
|
|
|
ephemeral numeric DEFAULT false,
|
|
|
|
used numeric DEFAULT false,
|
|
|
|
tags text,
|
|
|
|
expiration datetime,
|
|
|
|
created_at datetime,
|
|
|
|
CONSTRAINT fk_pre_auth_keys_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE SET NULL
|
|
|
|
)`,
|
|
|
|
`CREATE TABLE api_keys(
|
|
|
|
id integer PRIMARY KEY AUTOINCREMENT,
|
|
|
|
prefix text,
|
|
|
|
hash blob,
|
|
|
|
expiration datetime,
|
|
|
|
last_seen datetime,
|
|
|
|
created_at datetime
|
|
|
|
)`,
|
|
|
|
`CREATE TABLE nodes(
|
|
|
|
id integer PRIMARY KEY AUTOINCREMENT,
|
|
|
|
machine_key text,
|
|
|
|
node_key text,
|
|
|
|
disco_key text,
|
|
|
|
endpoints text,
|
|
|
|
host_info text,
|
|
|
|
ipv4 text,
|
|
|
|
ipv6 text,
|
|
|
|
hostname text,
|
|
|
|
given_name varchar(63),
|
|
|
|
user_id integer,
|
|
|
|
register_method text,
|
|
|
|
forced_tags text,
|
|
|
|
auth_key_id integer,
|
|
|
|
last_seen datetime,
|
|
|
|
expiry datetime,
|
|
|
|
approved_routes text,
|
|
|
|
created_at datetime,
|
|
|
|
updated_at datetime,
|
|
|
|
deleted_at datetime,
|
|
|
|
CONSTRAINT fk_nodes_user FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE,
|
|
|
|
CONSTRAINT fk_nodes_auth_key FOREIGN KEY(auth_key_id) REFERENCES pre_auth_keys(id)
|
|
|
|
)`,
|
|
|
|
`CREATE TABLE policies(
|
|
|
|
id integer PRIMARY KEY AUTOINCREMENT,
|
|
|
|
data text,
|
|
|
|
created_at datetime,
|
|
|
|
updated_at datetime,
|
|
|
|
deleted_at datetime
|
|
|
|
)`,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, createSQL := range tableCreationSQL {
|
|
|
|
if err := tx.Exec(createSQL).Error; err != nil {
|
|
|
|
return fmt.Errorf("creating new table: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy data directly using SQL
|
|
|
|
dataCopySQL := []string{
|
|
|
|
`INSERT INTO users (id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at)
|
|
|
|
SELECT id, name, display_name, email, provider_identifier, provider, profile_pic_url, created_at, updated_at, deleted_at
|
|
|
|
FROM users_old`,
|
|
|
|
|
|
|
|
`INSERT INTO pre_auth_keys (id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at)
|
|
|
|
SELECT id, key, user_id, reusable, ephemeral, used, tags, expiration, created_at
|
|
|
|
FROM pre_auth_keys_old`,
|
|
|
|
|
|
|
|
`INSERT INTO api_keys (id, prefix, hash, expiration, last_seen, created_at)
|
|
|
|
SELECT id, prefix, hash, expiration, last_seen, created_at
|
|
|
|
FROM api_keys_old`,
|
|
|
|
|
|
|
|
`INSERT INTO nodes (id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at)
|
|
|
|
SELECT id, machine_key, node_key, disco_key, endpoints, host_info, ipv4, ipv6, hostname, given_name, user_id, register_method, forced_tags, auth_key_id, last_seen, expiry, approved_routes, created_at, updated_at, deleted_at
|
|
|
|
FROM nodes_old`,
|
|
|
|
|
|
|
|
`INSERT INTO policies (id, data, created_at, updated_at, deleted_at)
|
|
|
|
SELECT id, data, created_at, updated_at, deleted_at
|
|
|
|
FROM policies_old`,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, copySQL := range dataCopySQL {
|
|
|
|
if err := tx.Exec(copySQL).Error; err != nil {
|
|
|
|
return fmt.Errorf("copying data: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create indexes
|
|
|
|
indexes := []string{
|
|
|
|
"CREATE INDEX idx_users_deleted_at ON users(deleted_at)",
|
|
|
|
`CREATE UNIQUE INDEX idx_provider_identifier ON users(
|
|
|
|
provider_identifier
|
|
|
|
) WHERE provider_identifier IS NOT NULL`,
|
|
|
|
`CREATE UNIQUE INDEX idx_name_provider_identifier ON users(
|
|
|
|
name,
|
|
|
|
provider_identifier
|
|
|
|
)`,
|
|
|
|
`CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users(
|
|
|
|
name
|
|
|
|
) WHERE provider_identifier IS NULL`,
|
|
|
|
"CREATE UNIQUE INDEX idx_api_keys_prefix ON api_keys(prefix)",
|
|
|
|
"CREATE INDEX idx_policies_deleted_at ON policies(deleted_at)",
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, indexSQL := range indexes {
|
|
|
|
if err := tx.Exec(indexSQL).Error; err != nil {
|
|
|
|
return fmt.Errorf("creating index: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Drop old tables only after everything succeeds
|
|
|
|
for _, table := range tablesToRename {
|
|
|
|
if err := tx.Exec("DROP TABLE IF EXISTS " + table + "_old").Error; err != nil {
|
|
|
|
log.Warn().Str("table", table+"_old").Err(err).Msg("Failed to drop old table, but migration succeeded")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info().Msg("Schema recreation completed successfully")
|
2025-07-10 23:38:55 +02:00
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(db *gorm.DB) error { return nil },
|
|
|
|
},
|
|
|
|
// From this point, the following rules must be followed:
|
|
|
|
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
|
|
|
// - AutoMigrate depends on the struct staying exactly the same, which it wont over time.
|
|
|
|
// - Never write migrations that requires foreign keys to be disabled.
|
2023-12-10 15:46:14 +01:00
|
|
|
},
|
2024-02-09 22:04:28 +05:30
|
|
|
)
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-08-29 23:08:54 -07:00
|
|
|
if err := runMigrations(cfg, dbConn, migrations); err != nil {
|
2023-12-10 15:46:14 +01:00
|
|
|
log.Fatal().Err(err).Msgf("Migration failed: %v", err)
|
2022-08-25 20:03:38 +10:00
|
|
|
}
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
// Validate that the schema ends up in the expected state.
|
|
|
|
// This is currently only done on sqlite as squibble does not
|
|
|
|
// support Postgres and we use our sqlite schema as our source of
|
|
|
|
// truth.
|
|
|
|
if cfg.Type == types.DatabaseSqlite {
|
|
|
|
sqlConn, err := dbConn.DB()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("getting DB from gorm: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// or else it blocks...
|
|
|
|
sqlConn.SetMaxIdleConns(maxIdleConns)
|
|
|
|
sqlConn.SetMaxOpenConns(maxOpenConns)
|
|
|
|
defer sqlConn.SetMaxIdleConns(1)
|
|
|
|
defer sqlConn.SetMaxOpenConns(1)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
if err := squibble.Validate(ctx, sqlConn, dbSchema); err != nil {
|
|
|
|
return nil, fmt.Errorf("validating schema: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-10 15:46:14 +01:00
|
|
|
db := HSDatabase{
|
Redo OIDC configuration (#2020)
expand user, add claims to user
This commit expands the user table with additional fields that
can be retrieved from OIDC providers (and other places) and
uses this data in various tailscale response objects if it is
available.
This is the beginning of implementing
https://docs.google.com/document/d/1X85PMxIaVWDF6T_UPji3OeeUqVBcGj_uHRM5CI-AwlY/edit
trying to make OIDC more coherant and maintainable in addition
to giving the user a better experience and integration with a
provider.
remove usernames in magic dns, normalisation of emails
this commit removes the option to have usernames as part of MagicDNS
domains and headscale will now align with Tailscale, where there is a
root domain, and the machine name.
In addition, the various normalisation functions for dns names has been
made lighter not caring about username and special character that wont
occur.
Email are no longer normalised as part of the policy processing.
untagle oidc and regcache, use typed cache
This commits stops reusing the registration cache for oidc
purposes and switches the cache to be types and not use any
allowing the removal of a bunch of casting.
try to make reauth/register branches clearer in oidc
Currently there was a function that did a bunch of stuff,
finding the machine key, trying to find the node, reauthing
the node, returning some status, and it was called validate
which was very confusing.
This commit tries to split this into what to do if the node
exists, if it needs to register etc.
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-10-02 14:50:17 +02:00
|
|
|
DB: dbConn,
|
|
|
|
cfg: &cfg,
|
|
|
|
regCache: regCache,
|
2021-09-02 16:57:26 +02:00
|
|
|
|
2023-12-10 15:46:14 +01:00
|
|
|
baseDomain: baseDomain,
|
2022-01-25 22:11:05 +00:00
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return &db, err
|
2020-06-21 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
|
|
|
// TODO(kradalby): Integrate this with zerolog
|
2023-05-11 09:09:18 +02:00
|
|
|
var dbLogger logger.Interface
|
2024-02-09 07:27:00 +01:00
|
|
|
if cfg.Debug {
|
2024-08-19 18:47:52 +09:00
|
|
|
dbLogger = util.NewDBLogWrapper(&log.Logger, cfg.Gorm.SlowThreshold, cfg.Gorm.SkipErrRecordNotFound, cfg.Gorm.ParameterizedQueries)
|
2021-07-11 13:13:36 +02:00
|
|
|
} else {
|
2023-05-11 09:09:18 +02:00
|
|
|
dbLogger = logger.Default.LogMode(logger.Silent)
|
2021-07-11 13:13:36 +02:00
|
|
|
}
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
switch cfg.Type {
|
|
|
|
case types.DatabaseSqlite:
|
2024-02-17 13:36:19 +01:00
|
|
|
dir := filepath.Dir(cfg.Sqlite.Path)
|
|
|
|
err := util.EnsureDir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("creating directory for sqlite: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-02-18 19:31:29 +01:00
|
|
|
log.Info().
|
|
|
|
Str("database", types.DatabaseSqlite).
|
|
|
|
Str("path", cfg.Sqlite.Path).
|
|
|
|
Msg("Opening database")
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
// Build SQLite configuration with pragmas set at connection time
|
|
|
|
sqliteConfig := sqliteconfig.Default(cfg.Sqlite.Path)
|
|
|
|
if cfg.Sqlite.WriteAheadLog {
|
|
|
|
sqliteConfig.JournalMode = sqliteconfig.JournalModeWAL
|
|
|
|
sqliteConfig.WALAutocheckpoint = cfg.Sqlite.WALAutoCheckPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
connectionURL, err := sqliteConfig.ToURL()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("building sqlite connection URL: %w", err)
|
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
db, err := gorm.Open(
|
2025-05-21 11:08:33 +02:00
|
|
|
sqlite.Open(connectionURL),
|
2022-02-23 16:15:20 +00:00
|
|
|
&gorm.Config{
|
2024-08-19 18:47:52 +09:00
|
|
|
PrepareStmt: cfg.Gorm.PrepareStmt,
|
|
|
|
Logger: dbLogger,
|
2022-02-23 16:15:20 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
// The pure Go SQLite library does not handle locking in
|
2025-02-05 16:10:18 +01:00
|
|
|
// the same way as the C based one and we can't use the gorm
|
2022-02-23 16:15:20 +00:00
|
|
|
// connection pool as of 2022/02/23.
|
2022-02-22 19:04:52 +00:00
|
|
|
sqlDB, _ := db.DB()
|
2022-02-23 16:15:20 +00:00
|
|
|
sqlDB.SetMaxIdleConns(1)
|
2022-02-22 19:04:52 +00:00
|
|
|
sqlDB.SetMaxOpenConns(1)
|
2022-02-23 16:15:20 +00:00
|
|
|
sqlDB.SetConnMaxIdleTime(time.Hour)
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return db, err
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
case types.DatabasePostgres:
|
|
|
|
dbString := fmt.Sprintf(
|
|
|
|
"host=%s dbname=%s user=%s",
|
|
|
|
cfg.Postgres.Host,
|
|
|
|
cfg.Postgres.Name,
|
|
|
|
cfg.Postgres.User,
|
|
|
|
)
|
|
|
|
|
2024-02-18 19:31:29 +01:00
|
|
|
log.Info().
|
|
|
|
Str("database", types.DatabasePostgres).
|
|
|
|
Str("path", dbString).
|
|
|
|
Msg("Opening database")
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil {
|
|
|
|
if !sslEnabled {
|
|
|
|
dbString += " sslmode=disable"
|
|
|
|
}
|
|
|
|
} else {
|
2025-05-21 11:08:33 +02:00
|
|
|
dbString += " sslmode=" + cfg.Postgres.Ssl
|
2024-02-09 07:27:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.Postgres.Port != 0 {
|
|
|
|
dbString += fmt.Sprintf(" port=%d", cfg.Postgres.Port)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.Postgres.Pass != "" {
|
2025-05-21 11:08:33 +02:00
|
|
|
dbString += " password=" + cfg.Postgres.Pass
|
2024-02-09 07:27:00 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{
|
2024-05-16 02:40:14 +02:00
|
|
|
Logger: dbLogger,
|
2021-06-24 23:05:26 +02:00
|
|
|
})
|
2024-02-09 22:04:28 +05:30
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sqlDB, _ := db.DB()
|
|
|
|
sqlDB.SetMaxIdleConns(cfg.Postgres.MaxIdleConnections)
|
|
|
|
sqlDB.SetMaxOpenConns(cfg.Postgres.MaxOpenConnections)
|
|
|
|
sqlDB.SetConnMaxIdleTime(
|
|
|
|
time.Duration(cfg.Postgres.ConnMaxIdleTimeSecs) * time.Second,
|
|
|
|
)
|
|
|
|
|
|
|
|
return db, nil
|
2021-06-24 15:44:19 +02:00
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"database of type %s is not supported: %w",
|
2024-02-09 07:27:00 +01:00
|
|
|
cfg.Type,
|
2023-05-11 09:09:18 +02:00
|
|
|
errDatabaseNotSupported,
|
|
|
|
)
|
|
|
|
}
|
2021-07-11 13:13:36 +02:00
|
|
|
|
2024-08-29 23:08:54 -07:00
|
|
|
func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormigrate.Gormigrate) error {
|
|
|
|
if cfg.Type == types.DatabaseSqlite {
|
2025-05-21 11:08:33 +02:00
|
|
|
// SQLite: Run migrations step-by-step, only disabling foreign keys when necessary
|
|
|
|
|
|
|
|
// List of migration IDs that require foreign keys to be disabled
|
|
|
|
// These are migrations that perform complex schema changes that GORM cannot handle safely with FK enabled
|
|
|
|
// NO NEW MIGRATIONS SHOULD BE ADDED HERE. ALL NEW MIGRATIONS MUST RUN WITH FOREIGN KEYS ENABLED.
|
|
|
|
migrationsRequiringFKDisabled := map[string]bool{
|
|
|
|
"202312101416": true, // Initial migration with complex table/column renames
|
|
|
|
"202402151347": true, // Migration that removes last_successful_update column
|
|
|
|
"2024041121742": true, // Migration that changes IP address storage format
|
|
|
|
"202407191627": true, // User table automigration with FK constraint issues
|
|
|
|
"202408181235": true, // User table automigration with FK constraint issues
|
|
|
|
"202501221827": true, // Route table automigration with FK constraint issues
|
|
|
|
"202501311657": true, // PreAuthKey table automigration with FK constraint issues
|
|
|
|
// Add other migration IDs here as they are identified to need FK disabled
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the current foreign key status
|
|
|
|
var fkOriginallyEnabled int
|
|
|
|
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil {
|
2024-08-29 23:08:54 -07:00
|
|
|
return fmt.Errorf("checking foreign key status: %w", err)
|
|
|
|
}
|
2025-05-21 11:08:33 +02:00
|
|
|
|
|
|
|
// Get all migration IDs in order from the actual migration definitions
|
|
|
|
// Only IDs that are in the migrationsRequiringFKDisabled map will be processed with FK disabled
|
|
|
|
// any other new migrations are ran after.
|
|
|
|
migrationIDs := []string{
|
|
|
|
"202312101416",
|
|
|
|
"202312101430",
|
|
|
|
"202402151347",
|
|
|
|
"2024041121742",
|
|
|
|
"202406021630",
|
|
|
|
"202407191627",
|
|
|
|
"202408181235",
|
|
|
|
"202409271400",
|
|
|
|
"202501221827",
|
|
|
|
"202501311657",
|
|
|
|
"202502070949",
|
|
|
|
"202502131714",
|
|
|
|
"202502171819",
|
|
|
|
"202505091439",
|
|
|
|
"202505141324",
|
|
|
|
// As of 2025-07-02, no new IDs should be added here.
|
|
|
|
// They will be ran by the migrations.Migrate() call below.
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, migrationID := range migrationIDs {
|
|
|
|
log.Trace().Str("migration_id", migrationID).Msg("Running migration")
|
|
|
|
needsFKDisabled := migrationsRequiringFKDisabled[migrationID]
|
|
|
|
|
|
|
|
if needsFKDisabled {
|
|
|
|
// Disable foreign keys for this migration
|
|
|
|
if err := dbConn.Exec("PRAGMA foreign_keys = OFF").Error; err != nil {
|
|
|
|
return fmt.Errorf("disabling foreign keys for migration %s: %w", migrationID, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Ensure foreign keys are enabled for this migration
|
|
|
|
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
|
|
|
return fmt.Errorf("enabling foreign keys for migration %s: %w", migrationID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run up to this specific migration (will only run the next pending migration)
|
|
|
|
if err := migrations.MigrateTo(migrationID); err != nil {
|
|
|
|
return fmt.Errorf("running migration %s: %w", migrationID, err)
|
2024-08-29 23:08:54 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
|
|
|
return fmt.Errorf("restoring foreign keys: %w", err)
|
|
|
|
}
|
2024-08-29 23:08:54 -07:00
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
// Run the rest of the migrations
|
|
|
|
if err := migrations.Migrate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for constraint violations at the end
|
2024-08-29 23:08:54 -07:00
|
|
|
type constraintViolation struct {
|
|
|
|
Table string
|
|
|
|
RowID int
|
|
|
|
Parent string
|
|
|
|
ConstraintIndex int
|
|
|
|
}
|
|
|
|
|
|
|
|
var violatedConstraints []constraintViolation
|
|
|
|
|
|
|
|
rows, err := dbConn.Raw("PRAGMA foreign_key_check").Rows()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var violation constraintViolation
|
|
|
|
if err := rows.Scan(&violation.Table, &violation.RowID, &violation.Parent, &violation.ConstraintIndex); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
violatedConstraints = append(violatedConstraints, violation)
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
|
|
|
|
if len(violatedConstraints) > 0 {
|
|
|
|
for _, violation := range violatedConstraints {
|
|
|
|
log.Error().
|
|
|
|
Str("table", violation.Table).
|
|
|
|
Int("row_id", violation.RowID).
|
|
|
|
Str("parent", violation.Parent).
|
|
|
|
Msg("Foreign key constraint violated")
|
|
|
|
}
|
|
|
|
|
2025-05-21 11:08:33 +02:00
|
|
|
return errForeignKeyConstraintsViolated
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// PostgreSQL can run all migrations in one block - no foreign key issues
|
|
|
|
if err := migrations.Migrate(); err != nil {
|
|
|
|
return err
|
2024-08-29 23:08:54 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-05-21 19:37:59 +03:00
|
|
|
func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
|
2022-09-04 11:43:09 +02:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
2022-07-06 13:39:10 +02:00
|
|
|
defer cancel()
|
2024-02-08 17:28:19 +01:00
|
|
|
sqlDB, err := hsdb.DB.DB()
|
2022-07-06 13:39:10 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return sqlDB.PingContext(ctx)
|
2022-07-06 13:39:10 +02:00
|
|
|
}
|
|
|
|
|
2023-05-21 19:37:59 +03:00
|
|
|
func (hsdb *HSDatabase) Close() error {
|
2024-02-08 17:28:19 +01:00
|
|
|
db, err := hsdb.DB.DB()
|
2023-05-21 19:37:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2022-03-01 16:31:25 +00:00
|
|
|
}
|
|
|
|
|
2024-10-02 11:41:58 +02:00
|
|
|
if hsdb.cfg.Type == types.DatabaseSqlite && hsdb.cfg.Sqlite.WriteAheadLog {
|
|
|
|
db.Exec("VACUUM")
|
|
|
|
}
|
|
|
|
|
2023-05-21 19:37:59 +03:00
|
|
|
return db.Close()
|
2022-03-01 16:31:25 +00:00
|
|
|
}
|
2024-02-08 17:28:19 +01:00
|
|
|
|
|
|
|
func (hsdb *HSDatabase) Read(fn func(rx *gorm.DB) error) error {
|
|
|
|
rx := hsdb.DB.Begin()
|
|
|
|
defer rx.Rollback()
|
|
|
|
return fn(rx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
|
|
|
rx := db.Begin()
|
|
|
|
defer rx.Rollback()
|
|
|
|
ret, err := fn(rx)
|
|
|
|
if err != nil {
|
|
|
|
var no T
|
|
|
|
return no, err
|
|
|
|
}
|
2025-05-21 11:08:33 +02:00
|
|
|
|
2024-02-08 17:28:19 +01:00
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {
|
|
|
|
tx := hsdb.DB.Begin()
|
|
|
|
defer tx.Rollback()
|
|
|
|
if err := fn(tx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return tx.Commit().Error
|
|
|
|
}
|
|
|
|
|
|
|
|
func Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) {
|
|
|
|
tx := db.Begin()
|
|
|
|
defer tx.Rollback()
|
|
|
|
ret, err := fn(tx)
|
|
|
|
if err != nil {
|
|
|
|
var no T
|
|
|
|
return no, err
|
|
|
|
}
|
2025-05-21 11:08:33 +02:00
|
|
|
|
2024-02-08 17:28:19 +01:00
|
|
|
return ret, tx.Commit().Error
|
|
|
|
}
|