2023-05-21 19:37:59 +03:00
|
|
|
package db
|
2020-06-21 12:32:08 +02:00
|
|
|
|
|
|
|
import (
|
2022-07-06 13:39:10 +02:00
|
|
|
"context"
|
2023-11-19 22:37:04 +01:00
|
|
|
"database/sql"
|
2020-06-21 12:32:08 +02:00
|
|
|
"errors"
|
2022-03-01 16:31:25 +00:00
|
|
|
"fmt"
|
2024-04-17 07:03:06 +02:00
|
|
|
"net/netip"
|
2024-02-17 13:36:19 +01:00
|
|
|
"path/filepath"
|
2024-02-09 07:27:00 +01:00
|
|
|
"strconv"
|
2023-11-16 17:55:29 +01:00
|
|
|
"strings"
|
2022-02-23 16:15:20 +00:00
|
|
|
"time"
|
2020-06-21 12:32:08 +02:00
|
|
|
|
2022-02-22 16:18:25 +00:00
|
|
|
"github.com/glebarez/sqlite"
|
2023-12-10 15:46:14 +01:00
|
|
|
"github.com/go-gormigrate/gormigrate/v2"
|
2022-02-28 18:05:03 +00:00
|
|
|
"github.com/rs/zerolog/log"
|
2021-06-24 15:44:19 +02:00
|
|
|
"gorm.io/driver/postgres"
|
|
|
|
"gorm.io/gorm"
|
2021-07-11 13:13:36 +02:00
|
|
|
"gorm.io/gorm/logger"
|
2024-02-09 22:04:28 +05:30
|
|
|
|
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
|
|
|
"github.com/juanfont/headscale/hscontrol/util"
|
2020-06-21 12:32:08 +02:00
|
|
|
)
|
|
|
|
|
2023-12-10 15:46:14 +01:00
|
|
|
var errDatabaseNotSupported = errors.New("database type not supported")
|
2020-06-21 12:32:08 +02:00
|
|
|
|
2021-02-23 20:11:30 +01:00
|
|
|
// KV is a key-value store in a psql table. For future use...
|
2023-05-11 09:09:18 +02:00
|
|
|
// TODO(kradalby): Is this used for anything?
|
2020-06-21 12:32:08 +02:00
|
|
|
type KV struct {
|
|
|
|
Key string
|
|
|
|
Value string
|
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
type HSDatabase struct {
|
2024-02-08 17:28:19 +01:00
|
|
|
DB *gorm.DB
|
2023-05-11 09:09:18 +02:00
|
|
|
|
2023-06-12 15:29:34 +02:00
|
|
|
baseDomain string
|
2023-05-11 09:09:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(kradalby): assemble this struct from toptions or something typed
|
|
|
|
// rather than arguments.
|
|
|
|
func NewHeadscaleDatabase(
|
2024-02-09 07:27:00 +01:00
|
|
|
cfg types.DatabaseConfig,
|
2023-05-11 09:09:18 +02:00
|
|
|
baseDomain string,
|
|
|
|
) (*HSDatabase, error) {
|
2024-02-09 07:27:00 +01:00
|
|
|
dbConn, err := openDB(cfg)
|
2020-06-21 12:32:08 +02:00
|
|
|
if err != nil {
|
2023-05-11 09:09:18 +02:00
|
|
|
return nil, err
|
2020-06-21 12:32:08 +02:00
|
|
|
}
|
2021-07-04 21:40:46 +02:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
migrations := gormigrate.New(
|
|
|
|
dbConn,
|
|
|
|
gormigrate.DefaultOptions,
|
|
|
|
[]*gormigrate.Migration{
|
|
|
|
// New migrations should be added as transactions at the end of this list.
|
|
|
|
// The initial commit here is quite messy, completely out of order and
|
|
|
|
// has no versioning and is the tech debt of not having versioned migrations
|
|
|
|
// prior to this point. This first migration is all DB changes to bring a DB
|
|
|
|
// up to 0.23.0.
|
|
|
|
{
|
|
|
|
ID: "202312101416",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
if cfg.Type == types.DatabasePostgres {
|
|
|
|
tx.Exec(`create extension if not exists "uuid-ossp";`)
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = tx.Migrator().RenameTable("namespaces", "users")
|
|
|
|
|
|
|
|
// the big rename from Machine to Node
|
|
|
|
_ = tx.Migrator().RenameTable("machines", "nodes")
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Route{}, "machine_id", "node_id")
|
|
|
|
|
|
|
|
err = tx.AutoMigrate(types.User{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Node{}, "namespace_id", "user_id")
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
|
|
|
|
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
|
|
|
|
_ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
|
|
|
|
|
|
|
|
// GivenName is used as the primary source of DNS names, make sure
|
|
|
|
// the field is populated and normalized if it was not when the
|
|
|
|
// node was registered.
|
|
|
|
_ = tx.Migrator().
|
|
|
|
RenameColumn(&types.Node{}, "nickname", "given_name")
|
|
|
|
|
|
|
|
// If the Node table has a column for registered,
|
|
|
|
// find all occourences of "false" and drop them. Then
|
|
|
|
// remove the column.
|
|
|
|
if tx.Migrator().HasColumn(&types.Node{}, "registered") {
|
2023-12-10 15:46:14 +01:00
|
|
|
log.Info().
|
2024-02-09 22:04:28 +05:30
|
|
|
Msg(`Database has legacy "registered" column in node, removing...`)
|
|
|
|
|
|
|
|
nodes := types.Nodes{}
|
|
|
|
if err := tx.Not("registered").Find(&nodes).Error; err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error accessing db")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
log.Info().
|
2023-12-10 15:46:14 +01:00
|
|
|
Str("node", node.Hostname).
|
|
|
|
Str("machine_key", node.MachineKey.ShortString()).
|
2024-02-09 22:04:28 +05:30
|
|
|
Msg("Deleting unregistered node")
|
|
|
|
if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil {
|
|
|
|
log.Error().
|
|
|
|
Err(err).
|
|
|
|
Str("node", node.Hostname).
|
|
|
|
Str("machine_key", node.MachineKey.ShortString()).
|
|
|
|
Msg("Error deleting unregistered node")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := tx.Migrator().DropColumn(&types.Node{}, "registered")
|
|
|
|
if err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error dropping registered column")
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.Route{})
|
2023-12-10 15:46:14 +01:00
|
|
|
if err != nil {
|
2024-02-09 22:04:28 +05:30
|
|
|
return err
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.Node{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
// Ensure all keys have correct prefixes
|
|
|
|
// https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35
|
|
|
|
type result struct {
|
|
|
|
ID uint64
|
|
|
|
MachineKey string
|
|
|
|
NodeKey string
|
|
|
|
DiscoKey string
|
|
|
|
}
|
|
|
|
var results []result
|
|
|
|
err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes").
|
|
|
|
Find(&results).
|
|
|
|
Error
|
2023-12-10 15:46:14 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-23 18:50:30 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
for _, node := range results {
|
|
|
|
mKey := node.MachineKey
|
|
|
|
if !strings.HasPrefix(node.MachineKey, "mkey:") {
|
|
|
|
mKey = "mkey:" + node.MachineKey
|
|
|
|
}
|
|
|
|
nKey := node.NodeKey
|
|
|
|
if !strings.HasPrefix(node.NodeKey, "nodekey:") {
|
|
|
|
nKey = "nodekey:" + node.NodeKey
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
dKey := node.DiscoKey
|
|
|
|
if !strings.HasPrefix(node.DiscoKey, "discokey:") {
|
|
|
|
dKey = "discokey:" + node.DiscoKey
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err := tx.Exec(
|
|
|
|
"UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id",
|
|
|
|
sql.Named("mKey", mKey),
|
|
|
|
sql.Named("nKey", nKey),
|
|
|
|
sql.Named("dKey", dKey),
|
|
|
|
sql.Named("id", node.ID),
|
|
|
|
).Error
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
|
|
|
|
log.Info().
|
|
|
|
Msgf("Database has legacy enabled_routes column in node, migrating...")
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
type NodeAux struct {
|
|
|
|
ID uint64
|
|
|
|
EnabledRoutes types.IPPrefixes
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
nodesAux := []NodeAux{}
|
|
|
|
err := tx.Table("nodes").
|
|
|
|
Select("id, enabled_routes").
|
|
|
|
Scan(&nodesAux).
|
|
|
|
Error
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal().Err(err).Msg("Error accessing db")
|
|
|
|
}
|
|
|
|
for _, node := range nodesAux {
|
|
|
|
for _, prefix := range node.EnabledRoutes {
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Err(err).
|
|
|
|
Str("enabled_route", prefix.String()).
|
|
|
|
Msg("Error parsing enabled_route")
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tx.Preload("Node").
|
|
|
|
Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)).
|
|
|
|
First(&types.Route{}).
|
|
|
|
Error
|
|
|
|
if err == nil {
|
|
|
|
log.Info().
|
|
|
|
Str("enabled_route", prefix.String()).
|
|
|
|
Msg("Route already migrated to new table, skipping")
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
route := types.Route{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Advertised: true,
|
|
|
|
Enabled: true,
|
|
|
|
Prefix: types.IPPrefix(prefix),
|
|
|
|
}
|
|
|
|
if err := tx.Create(&route).Error; err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error creating route")
|
|
|
|
} else {
|
|
|
|
log.Info().
|
|
|
|
Uint64("node_id", route.NodeID).
|
|
|
|
Str("prefix", prefix.String()).
|
|
|
|
Msg("Route migrated")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes")
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Err(err).
|
|
|
|
Msg("Error dropping enabled_routes column")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
if tx.Migrator().HasColumn(&types.Node{}, "given_name") {
|
|
|
|
nodes := types.Nodes{}
|
|
|
|
if err := tx.Find(&nodes).Error; err != nil {
|
|
|
|
log.Error().Err(err).Msg("Error accessing db")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
for item, node := range nodes {
|
|
|
|
if node.GivenName == "" {
|
|
|
|
normalizedHostname, err := util.NormalizeToFQDNRulesConfigFromViper(
|
|
|
|
node.Hostname,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Str("hostname", node.Hostname).
|
|
|
|
Err(err).
|
|
|
|
Msg("Failed to normalize node hostname in DB migration")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tx.Model(nodes[item]).Updates(types.Node{
|
|
|
|
GivenName: normalizedHostname,
|
|
|
|
}).Error
|
|
|
|
if err != nil {
|
|
|
|
log.Error().
|
|
|
|
Caller().
|
|
|
|
Str("hostname", node.Hostname).
|
|
|
|
Err(err).
|
|
|
|
Msg("Failed to save normalized node name in DB migration")
|
|
|
|
}
|
2023-12-10 15:46:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-11-23 18:50:30 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&KV{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-23 18:50:30 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.PreAuthKey{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-05-23 17:33:07 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.PreAuthKeyACLTag{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-05-23 17:33:07 +01:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
_ = tx.Migrator().DropTable("shared_machines")
|
2022-01-30 13:06:49 +00:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
err = tx.AutoMigrate(&types.APIKey{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-21 12:32:08 +02:00
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
2023-12-10 15:46:14 +01:00
|
|
|
},
|
2024-02-09 22:04:28 +05:30
|
|
|
{
|
|
|
|
// drop key-value table, it is not used, and has not contained
|
|
|
|
// useful data for a long time or ever.
|
|
|
|
ID: "202312101430",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
return tx.Migrator().DropTable("kvs")
|
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
2023-12-10 15:46:14 +01:00
|
|
|
},
|
2024-02-18 23:22:07 +01:00
|
|
|
{
|
|
|
|
// remove last_successful_update from node table,
|
|
|
|
// no longer used.
|
|
|
|
ID: "202402151347",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
2024-03-01 19:11:46 +01:00
|
|
|
err := tx.Migrator().DropColumn(&types.Node{}, "last_successful_update")
|
|
|
|
if err != nil && strings.Contains(err.Error(), `of relation "nodes" does not exist`) {
|
|
|
|
return nil
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2024-02-18 23:22:07 +01:00
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2024-04-17 07:03:06 +02:00
|
|
|
{
|
|
|
|
// Replace column with IP address list with dedicated
|
|
|
|
// IP v4 and v6 column.
|
|
|
|
// Note that previously, the list _could_ contain more
|
|
|
|
// than two addresses, which should not really happen.
|
|
|
|
// In that case, the first occurence of each type will
|
|
|
|
// be kept.
|
|
|
|
ID: "2024041121742",
|
|
|
|
Migrate: func(tx *gorm.DB) error {
|
|
|
|
_ = tx.Migrator().AddColumn(&types.Node{}, "ipv4")
|
|
|
|
_ = tx.Migrator().AddColumn(&types.Node{}, "ipv6")
|
|
|
|
|
|
|
|
type node struct {
|
|
|
|
ID uint64 `gorm:"column:id"`
|
|
|
|
Addresses string `gorm:"column:ip_addresses"`
|
|
|
|
}
|
|
|
|
|
|
|
|
var nodes []node
|
|
|
|
|
|
|
|
_ = tx.Raw("SELECT id, ip_addresses FROM nodes").Scan(&nodes).Error
|
|
|
|
|
|
|
|
for _, node := range nodes {
|
|
|
|
addrs := strings.Split(node.Addresses, ",")
|
|
|
|
|
|
|
|
if len(addrs) == 0 {
|
2024-04-17 11:09:22 +02:00
|
|
|
return fmt.Errorf("no addresses found for node(%d)", node.ID)
|
2024-04-17 07:03:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var v4 *netip.Addr
|
|
|
|
var v6 *netip.Addr
|
|
|
|
|
|
|
|
for _, addrStr := range addrs {
|
|
|
|
addr, err := netip.ParseAddr(addrStr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("parsing IP for node(%d) from database: %w", node.ID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if addr.Is4() && v4 == nil {
|
|
|
|
v4 = &addr
|
|
|
|
}
|
|
|
|
|
|
|
|
if addr.Is6() && v6 == nil {
|
|
|
|
v6 = &addr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-17 13:23:29 +00:00
|
|
|
if v4 != nil {
|
|
|
|
err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv4", v4.String()).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("saving ip addresses to new columns: %w", err)
|
|
|
|
}
|
2024-04-17 11:09:22 +02:00
|
|
|
}
|
|
|
|
|
2024-04-17 13:23:29 +00:00
|
|
|
if v6 != nil {
|
|
|
|
err = tx.Model(&types.Node{}).Where("id = ?", node.ID).Update("ipv6", v6.String()).Error
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("saving ip addresses to new columns: %w", err)
|
|
|
|
}
|
2024-04-17 07:03:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_ = tx.Migrator().DropColumn(&types.Node{}, "ip_addresses")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
Rollback: func(tx *gorm.DB) error {
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2023-12-10 15:46:14 +01:00
|
|
|
},
|
2024-02-09 22:04:28 +05:30
|
|
|
)
|
2023-12-10 15:46:14 +01:00
|
|
|
|
|
|
|
if err = migrations.Migrate(); err != nil {
|
|
|
|
log.Fatal().Err(err).Msgf("Migration failed: %v", err)
|
2022-08-25 20:03:38 +10:00
|
|
|
}
|
|
|
|
|
2023-12-10 15:46:14 +01:00
|
|
|
db := HSDatabase{
|
2024-02-08 17:28:19 +01:00
|
|
|
DB: dbConn,
|
2021-09-02 16:57:26 +02:00
|
|
|
|
2023-12-10 15:46:14 +01:00
|
|
|
baseDomain: baseDomain,
|
2022-01-25 22:11:05 +00:00
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return &db, err
|
2020-06-21 12:32:08 +02:00
|
|
|
}
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
func openDB(cfg types.DatabaseConfig) (*gorm.DB, error) {
|
|
|
|
// TODO(kradalby): Integrate this with zerolog
|
2023-05-11 09:09:18 +02:00
|
|
|
var dbLogger logger.Interface
|
2024-02-09 07:27:00 +01:00
|
|
|
if cfg.Debug {
|
2023-05-11 09:09:18 +02:00
|
|
|
dbLogger = logger.Default
|
2021-07-11 13:13:36 +02:00
|
|
|
} else {
|
2023-05-11 09:09:18 +02:00
|
|
|
dbLogger = logger.Default.LogMode(logger.Silent)
|
2021-07-11 13:13:36 +02:00
|
|
|
}
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
switch cfg.Type {
|
|
|
|
case types.DatabaseSqlite:
|
2024-02-17 13:36:19 +01:00
|
|
|
dir := filepath.Dir(cfg.Sqlite.Path)
|
|
|
|
err := util.EnsureDir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("creating directory for sqlite: %w", err)
|
|
|
|
}
|
|
|
|
|
2024-02-18 19:31:29 +01:00
|
|
|
log.Info().
|
|
|
|
Str("database", types.DatabaseSqlite).
|
|
|
|
Str("path", cfg.Sqlite.Path).
|
|
|
|
Msg("Opening database")
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
db, err := gorm.Open(
|
2024-02-09 07:27:00 +01:00
|
|
|
sqlite.Open(cfg.Sqlite.Path+"?_synchronous=1&_journal_mode=WAL"),
|
2022-02-23 16:15:20 +00:00
|
|
|
&gorm.Config{
|
|
|
|
DisableForeignKeyConstraintWhenMigrating: true,
|
2023-05-11 09:09:18 +02:00
|
|
|
Logger: dbLogger,
|
2022-02-23 16:15:20 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
db.Exec("PRAGMA foreign_keys=ON")
|
|
|
|
|
|
|
|
// The pure Go SQLite library does not handle locking in
|
|
|
|
// the same way as the C based one and we cant use the gorm
|
|
|
|
// connection pool as of 2022/02/23.
|
2022-02-22 19:04:52 +00:00
|
|
|
sqlDB, _ := db.DB()
|
2022-02-23 16:15:20 +00:00
|
|
|
sqlDB.SetMaxIdleConns(1)
|
2022-02-22 19:04:52 +00:00
|
|
|
sqlDB.SetMaxOpenConns(1)
|
2022-02-23 16:15:20 +00:00
|
|
|
sqlDB.SetConnMaxIdleTime(time.Hour)
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return db, err
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
case types.DatabasePostgres:
|
|
|
|
dbString := fmt.Sprintf(
|
|
|
|
"host=%s dbname=%s user=%s",
|
|
|
|
cfg.Postgres.Host,
|
|
|
|
cfg.Postgres.Name,
|
|
|
|
cfg.Postgres.User,
|
|
|
|
)
|
|
|
|
|
2024-02-18 19:31:29 +01:00
|
|
|
log.Info().
|
|
|
|
Str("database", types.DatabasePostgres).
|
|
|
|
Str("path", dbString).
|
|
|
|
Msg("Opening database")
|
|
|
|
|
2024-02-09 07:27:00 +01:00
|
|
|
if sslEnabled, err := strconv.ParseBool(cfg.Postgres.Ssl); err == nil {
|
|
|
|
if !sslEnabled {
|
|
|
|
dbString += " sslmode=disable"
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dbString += fmt.Sprintf(" sslmode=%s", cfg.Postgres.Ssl)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.Postgres.Port != 0 {
|
|
|
|
dbString += fmt.Sprintf(" port=%d", cfg.Postgres.Port)
|
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.Postgres.Pass != "" {
|
|
|
|
dbString += fmt.Sprintf(" password=%s", cfg.Postgres.Pass)
|
|
|
|
}
|
|
|
|
|
2024-02-09 22:04:28 +05:30
|
|
|
db, err := gorm.Open(postgres.Open(dbString), &gorm.Config{
|
2021-06-24 23:05:26 +02:00
|
|
|
DisableForeignKeyConstraintWhenMigrating: true,
|
2023-05-11 09:09:18 +02:00
|
|
|
Logger: dbLogger,
|
2021-06-24 23:05:26 +02:00
|
|
|
})
|
2024-02-09 22:04:28 +05:30
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sqlDB, _ := db.DB()
|
|
|
|
sqlDB.SetMaxIdleConns(cfg.Postgres.MaxIdleConnections)
|
|
|
|
sqlDB.SetMaxOpenConns(cfg.Postgres.MaxOpenConnections)
|
|
|
|
sqlDB.SetConnMaxIdleTime(
|
|
|
|
time.Duration(cfg.Postgres.ConnMaxIdleTimeSecs) * time.Second,
|
|
|
|
)
|
|
|
|
|
|
|
|
return db, nil
|
2021-06-24 15:44:19 +02:00
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"database of type %s is not supported: %w",
|
2024-02-09 07:27:00 +01:00
|
|
|
cfg.Type,
|
2023-05-11 09:09:18 +02:00
|
|
|
errDatabaseNotSupported,
|
|
|
|
)
|
|
|
|
}
|
2021-07-11 13:13:36 +02:00
|
|
|
|
2023-05-21 19:37:59 +03:00
|
|
|
func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
|
2022-09-04 11:43:09 +02:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
2022-07-06 13:39:10 +02:00
|
|
|
defer cancel()
|
2024-02-08 17:28:19 +01:00
|
|
|
sqlDB, err := hsdb.DB.DB()
|
2022-07-06 13:39:10 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-05-11 09:09:18 +02:00
|
|
|
return sqlDB.PingContext(ctx)
|
2022-07-06 13:39:10 +02:00
|
|
|
}
|
|
|
|
|
2023-05-21 19:37:59 +03:00
|
|
|
func (hsdb *HSDatabase) Close() error {
|
2024-02-08 17:28:19 +01:00
|
|
|
db, err := hsdb.DB.DB()
|
2023-05-21 19:37:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2022-03-01 16:31:25 +00:00
|
|
|
}
|
|
|
|
|
2023-05-21 19:37:59 +03:00
|
|
|
return db.Close()
|
2022-03-01 16:31:25 +00:00
|
|
|
}
|
2024-02-08 17:28:19 +01:00
|
|
|
|
|
|
|
func (hsdb *HSDatabase) Read(fn func(rx *gorm.DB) error) error {
|
|
|
|
rx := hsdb.DB.Begin()
|
|
|
|
defer rx.Rollback()
|
|
|
|
return fn(rx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Read[T any](db *gorm.DB, fn func(rx *gorm.DB) (T, error)) (T, error) {
|
|
|
|
rx := db.Begin()
|
|
|
|
defer rx.Rollback()
|
|
|
|
ret, err := fn(rx)
|
|
|
|
if err != nil {
|
|
|
|
var no T
|
|
|
|
return no, err
|
|
|
|
}
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hsdb *HSDatabase) Write(fn func(tx *gorm.DB) error) error {
|
|
|
|
tx := hsdb.DB.Begin()
|
|
|
|
defer tx.Rollback()
|
|
|
|
if err := fn(tx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return tx.Commit().Error
|
|
|
|
}
|
|
|
|
|
|
|
|
func Write[T any](db *gorm.DB, fn func(tx *gorm.DB) (T, error)) (T, error) {
|
|
|
|
tx := db.Begin()
|
|
|
|
defer tx.Rollback()
|
|
|
|
ret, err := fn(tx)
|
|
|
|
if err != nil {
|
|
|
|
var no T
|
|
|
|
return no, err
|
|
|
|
}
|
|
|
|
return ret, tx.Commit().Error
|
|
|
|
}
|