Files
zitadel/internal/migration/trigger.go
Livio Spring 0753ed2d6b feat(service ping): add additional resource counts (#10621)
# Which Problems Are Solved

Using the service ping, we want to have some additional insights to how
zitadel is configured. The current resource count report contains
already some amount of configured policies, such as the login_policy.
But we do not know if for example MFA is enforced.

# How the Problems Are Solved

- Added the following counts to the report:
  - service users per organization
  - MFA enforcements (though login policy)
  - Notification policies with password change option enabled
  - SCIM provisioned users (using user metadata)
- Since all of the above are conditional based on at least a column
inside a projection, a new `migration.CountTriggerConditional` has been
added, where a condition (column values) and an option to track updates
on that column should be considered for the count.
- For this to be possible, the following changes had to be made to the
existing sql resources:
- the `resource_name` has been added to unique constraint on the
`projection.resource_counts` table
- triggers have been added / changed to individually track `INSERT`,
`UPDATE`(s) and `DELETE` and be able to handle conditions
- an optional argument has been added to the
`projections.count_resource()` function to allow providing the
information to `UP` or `DOWN` count the resource on an update.

# Additional Changes

None

# Additional Context

- partially solves #10244 (reporting audit log retention limit will be
handled in #10245 directly)
- backport to v4.x

(cherry picked from commit 2dbe21fb30)
2025-09-15 08:30:02 +02:00

297 lines
8.8 KiB
Go

package migration
import (
"context"
"embed"
"fmt"
"reflect"
"strings"
"text/template"
"github.com/mitchellh/mapstructure"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/eventstore"
)
const (
countTriggerTmpl = "count_trigger"
deleteParentCountsTmpl = "delete_parent_counts_trigger"
)
var (
//go:embed *.sql
templateFS embed.FS
templates = template.Must(template.ParseFS(templateFS, "*.sql"))
)
// CountTrigger registers the existing projections.count_trigger function.
// The trigger than takes care of keeping count of existing
// rows in the source table.
// It also pre-populates the projections.resource_counts table with
// the counts for the given table.
//
// During the population of the resource_counts table,
// the source table is share-locked to prevent concurrent modifications.
// Projection handlers will be halted until the lock is released.
// SELECT statements are not blocked by the lock.
//
// This migration repeats when any of the arguments are changed,
// such as renaming of a projection table.
func CountTrigger(
db *database.DB,
table string,
parentType domain.CountParentType,
instanceIDColumn string,
parentIDColumn string,
resource string,
) RepeatableMigration {
return CountTriggerConditional(
db,
table,
parentType,
instanceIDColumn,
parentIDColumn,
resource,
false,
nil,
)
}
// CountTriggerConditional registers the existing projections.count_trigger function
// with conditions for specific column values and will only count rows that meet the conditions.
// The trigger than takes care of keeping count of existing
// rows in the source table.
// Additionally, if trackChange is true, the trigger will also keep track of
// updates to the rows that meet the conditions in case the values of the
// specified columns change.
// It also pre-populates the projections.resource_counts table with
// the counts for the given table.
//
// During the population of the resource_counts table,
// the source table is share-locked to prevent concurrent modifications.
// Projection handlers will be halted until the lock is released.
// SELECT statements are not blocked by the lock.
//
// This migration repeats when any of the arguments are changed,
// such as renaming of a projection table.
func CountTriggerConditional(
db *database.DB,
table string,
parentType domain.CountParentType,
instanceIDColumn string,
parentIDColumn string,
resource string,
trackChange bool,
conditions TriggerConditions,
) RepeatableMigration {
return &triggerMigration{
triggerConfig: triggerConfig{
Table: table,
ParentType: parentType.String(),
InstanceIDColumn: instanceIDColumn,
ParentIDColumn: parentIDColumn,
Resource: resource,
Conditions: conditions,
TrackChange: trackChange,
},
db: db,
templateName: countTriggerTmpl,
}
}
type TriggerConditions interface {
ToSQL(table string, conditionsMet bool) string
}
type TriggerCondition struct {
Column string `json:"column" mapstructure:"column"`
Value any `json:"value" mapstructure:"value"`
}
// ToSQL implements the [TriggerConditions] interface.
// If conditionsMet is true, the SQL will be built to match when the condition is met.
// If conditionsMet is false, the SQL will be built to match when the condition is not met.
// e.g. col='value' vs col<>'value'
func (t TriggerCondition) ToSQL(table string, conditionsMet bool) string {
value := fmt.Sprintf("%v", t.Value)
if reflect.TypeOf(t.Value).Kind() == reflect.String {
value = fmt.Sprintf("'%s'", t.Value)
}
operator := "="
if !conditionsMet {
operator = "<>"
}
return fmt.Sprintf("%s.%s %s %s", table, t.Column, operator, value)
}
type OrCondition struct {
Conditions []TriggerCondition `json:"orConditions" mapstructure:"orConditions"`
}
// ToSQL implements the [TriggerConditions] interface.
// If conditionsMet is true, the SQL will be built to match when any of the conditions are met (OR).
// If conditionsMet is false, the SQL will be built to match when none of the conditions are met (AND).
// e.g. col1='value' OR col2='value' vs col1<>'value' AND col2<>'value'
func (t OrCondition) ToSQL(table string, conditionsMet bool) string {
separator := " OR "
if !conditionsMet {
separator = " AND "
}
return toSQL(t.Conditions, table, separator, conditionsMet)
}
type AndCondition struct {
Conditions []TriggerCondition `json:"andConditions" mapstructure:"andConditions"`
}
// ToSQL implements the [TriggerConditions] interface.
// If conditionsMet is true, the SQL will be built to check if all conditions are met (AND).
// If conditionsMet is false, the SQL will be built to check if any condition is not met (OR).
// e.g. col1='value' AND col2='value' vs col1<>'value' OR col2<>'value'
func (t AndCondition) ToSQL(table string, conditionsMet bool) string {
separator := " AND "
if !conditionsMet {
separator = " OR "
}
return toSQL(t.Conditions, table, separator, conditionsMet)
}
func toSQL(conditions []TriggerCondition, table, separator string, conditionsMet bool) string {
if len(conditions) == 0 {
return ""
}
parts := make([]string, len(conditions))
for i, condition := range conditions {
parts[i] = condition.ToSQL(table, conditionsMet)
}
return "(" + strings.Join(parts, separator) + ")"
}
// DeleteParentCountsTrigger
//
// This migration repeats when any of the arguments are changed,
// such as renaming of a projection table.
func DeleteParentCountsTrigger(
db *database.DB,
table string,
parentType domain.CountParentType,
instanceIDColumn string,
parentIDColumn string,
resource string,
) RepeatableMigration {
return &triggerMigration{
triggerConfig: triggerConfig{
Table: table,
ParentType: parentType.String(),
InstanceIDColumn: instanceIDColumn,
ParentIDColumn: parentIDColumn,
Resource: resource,
},
db: db,
templateName: deleteParentCountsTmpl,
}
}
type triggerMigration struct {
triggerConfig
db *database.DB
templateName string
}
// String implements [Migration] and [fmt.Stringer].
func (m *triggerMigration) String() string {
return fmt.Sprintf("repeatable_%s_%s", m.Resource, m.templateName)
}
// Execute implements [Migration]
func (m *triggerMigration) Execute(ctx context.Context, _ eventstore.Event) error {
var query strings.Builder
err := templates.ExecuteTemplate(&query, m.templateName, m.triggerConfig)
if err != nil {
return fmt.Errorf("%s: execute trigger template: %w", m, err)
}
_, err = m.db.ExecContext(ctx, query.String())
if err != nil {
return fmt.Errorf("%s: exec trigger query: %w", m, err)
}
return nil
}
type triggerConfig struct {
Table string `json:"table,omitempty" mapstructure:"table"`
ParentType string `json:"parent_type,omitempty" mapstructure:"parent_type"`
InstanceIDColumn string `json:"instance_id_column,omitempty" mapstructure:"instance_id_column"`
ParentIDColumn string `json:"parent_id_column,omitempty" mapstructure:"parent_id_column"`
Resource string `json:"resource,omitempty" mapstructure:"resource"`
Conditions TriggerConditions `json:"conditions,omitempty" mapstructure:"conditions"`
TrackChange bool `json:"track_change,omitempty" mapstructure:"track_change"`
}
// Check implements [RepeatableMigration].
func (c *triggerConfig) Check(lastRun map[string]any) bool {
var dst triggerConfig
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: DecodeTriggerConditionsHook(),
WeaklyTypedInput: true,
Result: &dst,
})
if err != nil {
panic(err)
}
if err = decoder.Decode(lastRun); err != nil {
return true
}
return !reflect.DeepEqual(dst, *c)
}
// DecodeTriggerConditionsHook returns a mapstructure.DecodeHookFunc that can decode
// a map into the correct concrete type implementing [TriggerConditions].
func DecodeTriggerConditionsHook() mapstructure.DecodeHookFunc {
return func(
from reflect.Type,
to reflect.Type,
data interface{},
) (interface{}, error) {
if to != reflect.TypeOf((*TriggerConditions)(nil)).Elem() {
return data, nil
}
mapData, ok := data.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("expected a map for TriggerConditions, but got %T", data)
}
var result TriggerConditions
if _, ok := mapData["orConditions"]; ok {
result = &OrCondition{}
} else if _, ok := mapData["andConditions"]; ok {
result = &AndCondition{}
} else if _, ok := mapData["column"]; ok {
result = &TriggerCondition{}
} else {
return data, nil
}
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: result,
DecodeHook: DecodeTriggerConditionsHook(),
})
if err != nil {
return nil, err
}
if err := decoder.Decode(mapData); err != nil {
return nil, err
}
return result, nil
}
}