feat(service ping): add additional resource counts (#10621)

# Which Problems Are Solved

Using the service ping, we want to have some additional insights to how
zitadel is configured. The current resource count report contains
already some amount of configured policies, such as the login_policy.
But we do not know if for example MFA is enforced.

# How the Problems Are Solved

- Added the following counts to the report:
  - service users per organization
  - MFA enforcements (though login policy)
  - Notification policies with password change option enabled
  - SCIM provisioned users (using user metadata)
- Since all of the above are conditional based on at least a column
inside a projection, a new `migration.CountTriggerConditional` has been
added, where a condition (column values) and an option to track updates
on that column should be considered for the count.
- For this to be possible, the following changes had to be made to the
existing sql resources:
- the `resource_name` has been added to unique constraint on the
`projection.resource_counts` table
- triggers have been added / changed to individually track `INSERT`,
`UPDATE`(s) and `DELETE` and be able to handle conditions
- an optional argument has been added to the
`projections.count_resource()` function to allow providing the
information to `UP` or `DOWN` count the resource on an update.

# Additional Changes

None

# Additional Context

- partially solves #10244 (reporting audit log retention limit will be
handled in #10245 directly)
- backport to v4.x
This commit is contained in:
Livio Spring
2025-09-08 18:30:03 +02:00
committed by GitHub
parent ef058c8de6
commit 2dbe21fb30
10 changed files with 691 additions and 28 deletions

27
cmd/setup/63.go Normal file
View File

@@ -0,0 +1,27 @@
package setup
import (
"context"
_ "embed"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/eventstore"
)
var (
//go:embed 63.sql
alterResourceCounts string
)
type AlterResourceCounts struct {
dbClient *database.DB
}
func (mig *AlterResourceCounts) Execute(ctx context.Context, _ eventstore.Event) error {
_, err := mig.dbClient.ExecContext(ctx, alterResourceCounts)
return err
}
func (mig *AlterResourceCounts) String() string {
return "63_alter_resource_counts"
}

57
cmd/setup/63.sql Normal file
View File

@@ -0,0 +1,57 @@
ALTER TABLE projections.resource_counts
DROP CONSTRAINT resource_counts_instance_id_parent_type_parent_id_table_nam_key;
ALTER TABLE projections.resource_counts
ADD CONSTRAINT unique_resource
UNIQUE (instance_id, parent_type, parent_id, table_name, resource_name);
-- count_resource is a trigger function which increases or decreases the count of a resource.
-- When creating the trigger the following required arguments (TG_ARGV) can be passed:
-- 1. The type of the parent
-- 2. The column name of the instance id
-- 3. The column name of the owner id
-- 4. The name of the resource
-- 5. (optional) 'UP' or 'DOWN' to indicate if an UPDATE should count up or down.
CREATE OR REPLACE FUNCTION projections.count_resource()
RETURNS trigger
LANGUAGE 'plpgsql' VOLATILE
AS $$
DECLARE
-- trigger variables
tg_table_name TEXT := TG_TABLE_SCHEMA || '.' || TG_TABLE_NAME;
tg_parent_type TEXT := TG_ARGV[0];
tg_instance_id_column TEXT := TG_ARGV[1];
tg_parent_id_column TEXT := TG_ARGV[2];
tg_resource_name TEXT := TG_ARGV[3];
tg_update_method TEXT := coalesce(TG_ARGV[4], '');
tg_instance_id TEXT;
tg_parent_id TEXT;
select_ids TEXT := format('SELECT ($1).%I, ($1).%I', tg_instance_id_column, tg_parent_id_column);
BEGIN
IF (TG_OP = 'INSERT' OR (TG_OP = 'UPDATE' and tg_update_method = 'UP')) THEN
EXECUTE select_ids INTO tg_instance_id, tg_parent_id USING NEW;
INSERT INTO projections.resource_counts(instance_id, table_name, parent_type, parent_id, resource_name)
VALUES (tg_instance_id, tg_table_name, tg_parent_type, tg_parent_id, tg_resource_name)
ON CONFLICT (instance_id, table_name, parent_type, parent_id, resource_name) DO
UPDATE SET updated_at = now(), amount = projections.resource_counts.amount + 1;
RETURN NEW;
ELSEIF (TG_OP = 'DELETE' OR (TG_OP = 'UPDATE' and tg_update_method = 'DOWN')) THEN
EXECUTE select_ids INTO tg_instance_id, tg_parent_id USING OLD;
UPDATE projections.resource_counts
SET updated_at = now(), amount = amount - 1
WHERE instance_id = tg_instance_id
AND table_name = tg_table_name
AND parent_type = tg_parent_type
AND parent_id = tg_parent_id
AND resource_name = tg_resource_name
AND amount > 0; -- prevent check failure on negative amount.
RETURN OLD;
END IF;
END
$$;

View File

@@ -159,6 +159,7 @@ type Steps struct {
s60GenerateSystemID *GenerateSystemID
s61IDPTemplate6SAMLSignatureAlgorithm *IDPTemplate6SAMLSignatureAlgorithm
s62HTTPProviderAddSigningKey *HTTPProviderAddSigningKey
s63AlterResourceCounts *AlterResourceCounts
}
func MustNewSteps(v *viper.Viper) *Steps {

View File

@@ -220,6 +220,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s60GenerateSystemID = &GenerateSystemID{eventstore: eventstoreClient}
steps.s61IDPTemplate6SAMLSignatureAlgorithm = &IDPTemplate6SAMLSignatureAlgorithm{dbClient: dbClient}
steps.s62HTTPProviderAddSigningKey = &HTTPProviderAddSigningKey{dbClient: dbClient}
steps.s63AlterResourceCounts = &AlterResourceCounts{dbClient: dbClient}
err = projection.Create(ctx, dbClient, eventstoreClient, config.Projections, nil, nil, nil)
logging.OnError(err).Fatal("unable to start projections")
@@ -270,6 +271,7 @@ func Setup(ctx context.Context, config *Config, steps *Steps, masterKey string)
steps.s60GenerateSystemID,
steps.s61IDPTemplate6SAMLSignatureAlgorithm,
steps.s62HTTPProviderAddSigningKey,
steps.s63AlterResourceCounts,
} {
setupErr = executeMigration(ctx, eventstoreClient, step, "migration failed")
if setupErr != nil {

View File

@@ -3,10 +3,12 @@ package setup
import (
"fmt"
"github.com/zitadel/zitadel/internal/api/scim/metadata"
"github.com/zitadel/zitadel/internal/database"
"github.com/zitadel/zitadel/internal/domain"
"github.com/zitadel/zitadel/internal/migration"
"github.com/zitadel/zitadel/internal/query/projection"
"github.com/zitadel/zitadel/internal/serviceping"
)
// triggerSteps defines the repeatable migrations that set up triggers
@@ -19,14 +21,14 @@ func triggerSteps(db *database.DB) []migration.RepeatableMigration {
domain.CountParentTypeInstance,
projection.InstanceColumnID,
projection.InstanceColumnID,
"instance",
serviceping.ResourceCountInstance,
),
migration.DeleteParentCountsTrigger(db,
projection.OrgProjectionTable,
domain.CountParentTypeOrganization,
projection.OrgColumnInstanceID,
projection.OrgColumnID,
"organization",
serviceping.ResourceCountOrganization,
),
// Count triggers for all the resources
@@ -35,91 +37,143 @@ func triggerSteps(db *database.DB) []migration.RepeatableMigration {
domain.CountParentTypeInstance,
projection.OrgColumnInstanceID,
projection.OrgColumnInstanceID,
"organization",
serviceping.ResourceCountOrganization,
),
migration.CountTrigger(db,
projection.ProjectProjectionTable,
domain.CountParentTypeOrganization,
projection.ProjectColumnInstanceID,
projection.ProjectColumnResourceOwner,
"project",
serviceping.ResourceCountProject,
),
migration.CountTrigger(db,
projection.UserTable,
domain.CountParentTypeOrganization,
projection.UserInstanceIDCol,
projection.UserResourceOwnerCol,
"user",
serviceping.ResourceCountUser,
),
migration.CountTriggerConditional(db,
projection.UserTable,
domain.CountParentTypeOrganization,
projection.UserInstanceIDCol,
projection.UserResourceOwnerCol,
serviceping.ResourceCountUserMachine,
false, // the user type cannot change, so we do not need to track updates
&migration.TriggerCondition{
Column: projection.UserTypeCol,
// since we marshal the value into and from json,
// we directly use the float64 value to prevent issues with the comparison of the previous migration
Value: float64(2),
},
),
migration.CountTrigger(db,
projection.InstanceMemberProjectionTable,
domain.CountParentTypeInstance,
projection.MemberInstanceID,
projection.MemberResourceOwner,
"iam_admin",
serviceping.ResourceCountIAMAdmin,
),
migration.CountTrigger(db,
projection.IDPTable,
domain.CountParentTypeInstance,
projection.IDPInstanceIDCol,
projection.IDPInstanceIDCol,
"identity_provider",
serviceping.ResourceCountIdentityProvider,
),
migration.CountTrigger(db,
projection.IDPTemplateLDAPTable,
domain.CountParentTypeInstance,
projection.LDAPInstanceIDCol,
projection.LDAPInstanceIDCol,
"identity_provider_ldap",
serviceping.ResourceCountIdentityProviderLDAP,
),
migration.CountTrigger(db,
projection.ActionTable,
domain.CountParentTypeInstance,
projection.ActionInstanceIDCol,
projection.ActionInstanceIDCol,
"action_v1",
serviceping.ResourceCountActionV1,
),
migration.CountTrigger(db,
projection.ExecutionTable,
domain.CountParentTypeInstance,
projection.ExecutionInstanceIDCol,
projection.ExecutionInstanceIDCol,
"execution",
serviceping.ResourceCountActionExecution,
),
migration.CountTrigger(db,
fmt.Sprintf("%s_%s", projection.ExecutionTable, projection.ExecutionTargetSuffix),
domain.CountParentTypeInstance,
projection.ExecutionTargetInstanceIDCol,
projection.ExecutionTargetInstanceIDCol,
"execution_target",
serviceping.ResourceCountActionExecutionTarget,
),
migration.CountTrigger(db,
projection.LoginPolicyTable,
domain.CountParentTypeInstance,
projection.LoginPolicyInstanceIDCol,
projection.LoginPolicyInstanceIDCol,
"login_policy",
serviceping.ResourceCountLoginPolicy,
),
migration.CountTriggerConditional(db,
projection.LoginPolicyTable,
domain.CountParentTypeInstance,
projection.LoginPolicyInstanceIDCol,
projection.LoginPolicyInstanceIDCol,
serviceping.ResourceCountEnforceMFA,
true,
&migration.OrCondition{
Conditions: []migration.TriggerCondition{
{Column: projection.LoginPolicyForceMFACol, Value: true},
{Column: projection.LoginPolicyForceMFALocalOnlyCol, Value: true},
},
},
),
migration.CountTrigger(db,
projection.PasswordComplexityTable,
domain.CountParentTypeInstance,
projection.ComplexityPolicyInstanceIDCol,
projection.ComplexityPolicyInstanceIDCol,
"password_complexity_policy",
serviceping.ResourceCountPasswordComplexityPolicy,
),
migration.CountTrigger(db,
projection.PasswordAgeTable,
domain.CountParentTypeInstance,
projection.AgePolicyInstanceIDCol,
projection.AgePolicyInstanceIDCol,
"password_expiry_policy",
serviceping.ResourceCountPasswordExpiryPolicy,
),
migration.CountTrigger(db,
projection.LockoutPolicyTable,
domain.CountParentTypeInstance,
projection.LockoutPolicyInstanceIDCol,
projection.LockoutPolicyInstanceIDCol,
"lockout_policy",
serviceping.ResourceCountLockoutPolicy,
),
migration.CountTriggerConditional(db,
projection.NotificationPolicyProjectionTable,
domain.CountParentTypeInstance,
projection.NotificationPolicyColumnInstanceID,
projection.NotificationPolicyColumnInstanceID,
serviceping.ResourceCountPasswordChangeNotification,
true,
&migration.TriggerCondition{
Column: projection.NotificationPolicyColumnPasswordChange,
Value: true,
},
),
migration.CountTriggerConditional(db,
projection.UserMetadataProjectionTable,
domain.CountParentTypeOrganization,
projection.UserMetadataColumnInstanceID,
projection.LockoutPolicyResourceOwnerCol,
serviceping.ResourceCountScimProvisionedUser,
false, // the key cannot change, so we do not need to track updates
&migration.TriggerCondition{
Column: projection.UserMetadataColumnKey,
Value: metadata.KeyEmails,
},
),
}
}

View File

@@ -37,15 +37,19 @@ The following resources are counted:
- Organizations
- Projects per organization
- Users per organization
- Users of type machine per organization
- SCIM provisioned users per organization
- Instance Administrators
- Identity Providers
- LDAP Identity Providers
- Actions (V1)
- Targets and set up executions
- Login Policies
- MFA enforcement (if either MFA is required for local or all users through the login policy)
- Password Complexity Policies
- Password Expiry Policies
- Lockout Policies
- Notification Policies with option "Password change" enabled
The list might be extended in the future to include more resources.

View File

@@ -1,8 +1,13 @@
{{ define "count_trigger" -}}
CREATE OR REPLACE TRIGGER count_{{ .Resource }}
AFTER INSERT OR DELETE
-- In case the old trigger exists, drop it to prevent duplicated counts.
DROP TRIGGER IF EXISTS count_{{ .Resource }} ON {{ .Table }};
CREATE OR REPLACE TRIGGER count_{{ .Resource }}_insert
AFTER INSERT
ON {{ .Table }}
FOR EACH ROW
-- Only count if the conditions are met in the newly added row.
{{if .Conditions}}WHEN ({{.Conditions.ToSQL "NEW" true}}){{end}}
EXECUTE FUNCTION projections.count_resource(
'{{ .ParentType }}',
'{{ .InstanceIDColumn }}',
@@ -10,6 +15,49 @@ CREATE OR REPLACE TRIGGER count_{{ .Resource }}
'{{ .Resource }}'
);
CREATE OR REPLACE TRIGGER count_{{ .Resource }}_delete
AFTER DELETE
ON {{ .Table }}
FOR EACH ROW
-- Only count down if the conditions were met in the old / deleted row.
{{if .Conditions}}WHEN ({{.Conditions.ToSQL "OLD" true}}){{end}}
EXECUTE FUNCTION projections.count_resource(
'{{ .ParentType }}',
'{{ .InstanceIDColumn }}',
'{{ .ParentIDColumn }}',
'{{ .Resource }}'
);
{{if .TrackChange}}
CREATE OR REPLACE TRIGGER count_{{ .Resource }}_update_up
AFTER UPDATE
ON {{ .Table }}
FOR EACH ROW
-- Only count up if the conditions are met in the new state, but were not in the old.
WHEN ({{.Conditions.ToSQL "NEW" true}} AND {{.Conditions.ToSQL "OLD" false}})
EXECUTE FUNCTION projections.count_resource(
'{{ .ParentType }}',
'{{ .InstanceIDColumn }}',
'{{ .ParentIDColumn }}',
'{{ .Resource }}',
'UP'
);
CREATE OR REPLACE TRIGGER count_{{ .Resource }}_update_down
AFTER UPDATE
ON {{ .Table }}
FOR EACH ROW
-- Only count down if the conditions are not met in the new state, but were in the old.
WHEN ({{.Conditions.ToSQL "NEW" false}} AND {{.Conditions.ToSQL "OLD" true}})
EXECUTE FUNCTION projections.count_resource(
'{{ .ParentType }}',
'{{ .InstanceIDColumn }}',
'{{ .ParentIDColumn }}',
'{{ .Resource }}',
'DOWN'
);
{{end}}
CREATE OR REPLACE TRIGGER truncate_{{ .Resource }}_counts
AFTER TRUNCATE
ON {{ .Table }}
@@ -36,8 +84,9 @@ SELECT
'{{ .Resource }}',
COUNT(*) AS amount
FROM {{ .Table }}
{{if .Conditions}}WHERE {{.Conditions.ToSQL .Table true}}{{end}}
GROUP BY ({{ .InstanceIDColumn }}, {{ .ParentIDColumn }})
ON CONFLICT (instance_id, table_name, parent_type, parent_id) DO
ON CONFLICT (instance_id, table_name, parent_type, parent_id, resource_name) DO
UPDATE SET updated_at = now(), amount = EXCLUDED.amount;
{{- end -}}

View File

@@ -4,6 +4,7 @@ import (
"context"
"embed"
"fmt"
"reflect"
"strings"
"text/template"
@@ -45,6 +46,45 @@ func CountTrigger(
instanceIDColumn string,
parentIDColumn string,
resource string,
) RepeatableMigration {
return CountTriggerConditional(
db,
table,
parentType,
instanceIDColumn,
parentIDColumn,
resource,
false,
nil,
)
}
// CountTriggerConditional registers the existing projections.count_trigger function
// with conditions for specific column values and will only count rows that meet the conditions.
// The trigger than takes care of keeping count of existing
// rows in the source table.
// Additionally, if trackChange is true, the trigger will also keep track of
// updates to the rows that meet the conditions in case the values of the
// specified columns change.
// It also pre-populates the projections.resource_counts table with
// the counts for the given table.
//
// During the population of the resource_counts table,
// the source table is share-locked to prevent concurrent modifications.
// Projection handlers will be halted until the lock is released.
// SELECT statements are not blocked by the lock.
//
// This migration repeats when any of the arguments are changed,
// such as renaming of a projection table.
func CountTriggerConditional(
db *database.DB,
table string,
parentType domain.CountParentType,
instanceIDColumn string,
parentIDColumn string,
resource string,
trackChange bool,
conditions TriggerConditions,
) RepeatableMigration {
return &triggerMigration{
triggerConfig: triggerConfig{
@@ -53,12 +93,86 @@ func CountTrigger(
InstanceIDColumn: instanceIDColumn,
ParentIDColumn: parentIDColumn,
Resource: resource,
Conditions: conditions,
TrackChange: trackChange,
},
db: db,
templateName: countTriggerTmpl,
}
}
type TriggerConditions interface {
ToSQL(table string, conditionsMet bool) string
}
type TriggerCondition struct {
Column string `json:"column" mapstructure:"column"`
Value any `json:"value" mapstructure:"value"`
}
// ToSQL implements the [TriggerConditions] interface.
// If conditionsMet is true, the SQL will be built to match when the condition is met.
// If conditionsMet is false, the SQL will be built to match when the condition is not met.
// e.g. col='value' vs col<>'value'
func (t TriggerCondition) ToSQL(table string, conditionsMet bool) string {
value := fmt.Sprintf("%v", t.Value)
if reflect.TypeOf(t.Value).Kind() == reflect.String {
value = fmt.Sprintf("'%s'", t.Value)
}
operator := "="
if !conditionsMet {
operator = "<>"
}
return fmt.Sprintf("%s.%s %s %s", table, t.Column, operator, value)
}
type OrCondition struct {
Conditions []TriggerCondition `json:"orConditions" mapstructure:"orConditions"`
}
// ToSQL implements the [TriggerConditions] interface.
// If conditionsMet is true, the SQL will be built to match when any of the conditions are met (OR).
// If conditionsMet is false, the SQL will be built to match when none of the conditions are met (AND).
// e.g. col1='value' OR col2='value' vs col1<>'value' AND col2<>'value'
func (t OrCondition) ToSQL(table string, conditionsMet bool) string {
separator := " OR "
if !conditionsMet {
separator = " AND "
}
return toSQL(t.Conditions, table, separator, conditionsMet)
}
type AndCondition struct {
Conditions []TriggerCondition `json:"andConditions" mapstructure:"andConditions"`
}
// ToSQL implements the [TriggerConditions] interface.
// If conditionsMet is true, the SQL will be built to check if all conditions are met (AND).
// If conditionsMet is false, the SQL will be built to check if any condition is not met (OR).
// e.g. col1='value' AND col2='value' vs col1<>'value' OR col2<>'value'
func (t AndCondition) ToSQL(table string, conditionsMet bool) string {
separator := " AND "
if !conditionsMet {
separator = " OR "
}
return toSQL(t.Conditions, table, separator, conditionsMet)
}
func toSQL(conditions []TriggerCondition, table, separator string, conditionsMet bool) string {
if len(conditions) == 0 {
return ""
}
parts := make([]string, len(conditions))
for i, condition := range conditions {
parts[i] = condition.ToSQL(table, conditionsMet)
}
return "(" + strings.Join(parts, separator) + ")"
}
// DeleteParentCountsTrigger
//
// This migration repeats when any of the arguments are changed,
@@ -110,18 +224,73 @@ func (m *triggerMigration) Execute(ctx context.Context, _ eventstore.Event) erro
}
type triggerConfig struct {
Table string `json:"table,omitempty" mapstructure:"table"`
ParentType string `json:"parent_type,omitempty" mapstructure:"parent_type"`
InstanceIDColumn string `json:"instance_id_column,omitempty" mapstructure:"instance_id_column"`
ParentIDColumn string `json:"parent_id_column,omitempty" mapstructure:"parent_id_column"`
Resource string `json:"resource,omitempty" mapstructure:"resource"`
Table string `json:"table,omitempty" mapstructure:"table"`
ParentType string `json:"parent_type,omitempty" mapstructure:"parent_type"`
InstanceIDColumn string `json:"instance_id_column,omitempty" mapstructure:"instance_id_column"`
ParentIDColumn string `json:"parent_id_column,omitempty" mapstructure:"parent_id_column"`
Resource string `json:"resource,omitempty" mapstructure:"resource"`
Conditions TriggerConditions `json:"conditions,omitempty" mapstructure:"conditions"`
TrackChange bool `json:"track_change,omitempty" mapstructure:"track_change"`
}
// Check implements [RepeatableMigration].
func (c *triggerConfig) Check(lastRun map[string]any) bool {
var dst triggerConfig
if err := mapstructure.Decode(lastRun, &dst); err != nil {
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: DecodeTriggerConditionsHook(),
WeaklyTypedInput: true,
Result: &dst,
})
if err != nil {
panic(err)
}
return dst != *c
if err = decoder.Decode(lastRun); err != nil {
return true
}
return !reflect.DeepEqual(dst, *c)
}
// DecodeTriggerConditionsHook returns a mapstructure.DecodeHookFunc that can decode
// a map into the correct concrete type implementing [TriggerConditions].
func DecodeTriggerConditionsHook() mapstructure.DecodeHookFunc {
return func(
from reflect.Type,
to reflect.Type,
data interface{},
) (interface{}, error) {
if to != reflect.TypeOf((*TriggerConditions)(nil)).Elem() {
return data, nil
}
mapData, ok := data.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("expected a map for TriggerConditions, but got %T", data)
}
var result TriggerConditions
if _, ok := mapData["orConditions"]; ok {
result = &OrCondition{}
} else if _, ok := mapData["andConditions"]; ok {
result = &AndCondition{}
} else if _, ok := mapData["column"]; ok {
result = &TriggerCondition{}
} else {
return data, nil
}
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: result,
DecodeHook: DecodeTriggerConditionsHook(),
})
if err != nil {
return nil, err
}
if err := decoder.Decode(mapData); err != nil {
return nil, err
}
return result, nil
}
}

View File

@@ -13,10 +13,26 @@ import (
)
const (
expCountTriggerQuery = `CREATE OR REPLACE TRIGGER count_resource
AFTER INSERT OR DELETE
expCountTriggerQuery = `-- In case the old trigger exists, drop it to prevent duplicated counts.
DROP TRIGGER IF EXISTS count_resource ON table;
CREATE OR REPLACE TRIGGER count_resource_insert
AFTER INSERT
ON table
FOR EACH ROW
-- Only count if the conditions are met in the newly added row.
EXECUTE FUNCTION projections.count_resource(
'instance',
'instance_id',
'parent_id',
'resource'
);
CREATE OR REPLACE TRIGGER count_resource_delete
AFTER DELETE
ON table
FOR EACH ROW
-- Only count down if the conditions were met in the old / deleted row.
EXECUTE FUNCTION projections.count_resource(
'instance',
'instance_id',
@@ -51,7 +67,95 @@ SELECT
COUNT(*) AS amount
FROM table
GROUP BY (instance_id, parent_id)
ON CONFLICT (instance_id, table_name, parent_type, parent_id) DO
ON CONFLICT (instance_id, table_name, parent_type, parent_id, resource_name) DO
UPDATE SET updated_at = now(), amount = EXCLUDED.amount;`
expCountTriggerConditionalQuery = `-- In case the old trigger exists, drop it to prevent duplicated counts.
DROP TRIGGER IF EXISTS count_resource ON table;
CREATE OR REPLACE TRIGGER count_resource_insert
AFTER INSERT
ON table
FOR EACH ROW
-- Only count if the conditions are met in the newly added row.
WHEN ((NEW.col1 = 'value1' OR NEW.col2 = 'value1'))
EXECUTE FUNCTION projections.count_resource(
'instance',
'instance_id',
'parent_id',
'resource'
);
CREATE OR REPLACE TRIGGER count_resource_delete
AFTER DELETE
ON table
FOR EACH ROW
-- Only count down if the conditions were met in the old / deleted row.
WHEN ((OLD.col1 = 'value1' OR OLD.col2 = 'value1'))
EXECUTE FUNCTION projections.count_resource(
'instance',
'instance_id',
'parent_id',
'resource'
);
CREATE OR REPLACE TRIGGER count_resource_update_up
AFTER UPDATE
ON table
FOR EACH ROW
-- Only count up if the conditions are met in the new state, but were not in the old.
WHEN ((NEW.col1 = 'value1' OR NEW.col2 = 'value1') AND (OLD.col1 <> 'value1' AND OLD.col2 <> 'value1'))
EXECUTE FUNCTION projections.count_resource(
'instance',
'instance_id',
'parent_id',
'resource',
'UP'
);
CREATE OR REPLACE TRIGGER count_resource_update_down
AFTER UPDATE
ON table
FOR EACH ROW
-- Only count down if the conditions are not met in the new state, but were in the old.
WHEN ((NEW.col1 <> 'value1' AND NEW.col2 <> 'value1') AND (OLD.col1 = 'value1' OR OLD.col2 = 'value1'))
EXECUTE FUNCTION projections.count_resource(
'instance',
'instance_id',
'parent_id',
'resource',
'DOWN'
);
CREATE OR REPLACE TRIGGER truncate_resource_counts
AFTER TRUNCATE
ON table
FOR EACH STATEMENT
EXECUTE FUNCTION projections.delete_table_counts();
-- Prevent inserts and deletes while we populate the counts.
LOCK TABLE table IN SHARE MODE;
-- Populate the resource counts for the existing data in the table.
INSERT INTO projections.resource_counts(
instance_id,
table_name,
parent_type,
parent_id,
resource_name,
amount
)
SELECT
instance_id,
'table',
'instance',
parent_id,
'resource',
COUNT(*) AS amount
FROM table
WHERE (table.col1 = 'value1' OR table.col2 = 'value1')
GROUP BY (instance_id, parent_id)
ON CONFLICT (instance_id, table_name, parent_type, parent_id, resource_name) DO
UPDATE SET updated_at = now(), amount = EXCLUDED.amount;`
expDeleteParentCountsQuery = `CREATE OR REPLACE TRIGGER delete_parent_counts_trigger
@@ -129,6 +233,33 @@ func Test_triggerMigration_Execute(t *testing.T) {
)
},
},
{
name: "count trigger conditionally",
fields: fields{
triggerConfig: triggerConfig{
Table: "table",
ParentType: "instance",
InstanceIDColumn: "instance_id",
ParentIDColumn: "parent_id",
Resource: "resource",
Conditions: OrCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
TrackChange: true,
},
templateName: countTriggerTmpl,
},
expects: func(mock sqlmock.Sqlmock) {
mock.ExpectExec(regexp.QuoteMeta(expCountTriggerConditionalQuery)).
WithoutArgs().
WillReturnResult(
sqlmock.NewResult(1, 1),
)
},
},
{
name: "count trigger",
fields: fields{
@@ -186,6 +317,8 @@ func Test_triggerConfig_Check(t *testing.T) {
InstanceIDColumn string
ParentIDColumn string
Resource string
TrackChange bool
Conditions TriggerConditions
}
type args struct {
lastRun map[string]any
@@ -204,6 +337,13 @@ func Test_triggerConfig_Check(t *testing.T) {
InstanceIDColumn: "instance_id",
ParentIDColumn: "parent_id",
Resource: "user",
TrackChange: true,
Conditions: &OrCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
},
args: args{
lastRun: map[string]any{
@@ -212,6 +352,13 @@ func Test_triggerConfig_Check(t *testing.T) {
"instance_id_column": "instance_id",
"parent_id_column": "parent_id",
"resource": "user",
"track_change": true,
"conditions": map[string]any{
"orConditions": []any{
map[string]any{"column": "col1", "value": "value1"},
map[string]any{"column": "col2", "value": "value1"},
},
},
},
},
want: true,
@@ -224,6 +371,13 @@ func Test_triggerConfig_Check(t *testing.T) {
InstanceIDColumn: "instance_id",
ParentIDColumn: "parent_id",
Resource: "user",
TrackChange: true,
Conditions: &AndCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
},
args: args{
lastRun: map[string]any{
@@ -232,6 +386,13 @@ func Test_triggerConfig_Check(t *testing.T) {
"instance_id_column": "instance_id",
"parent_id_column": "parent_id",
"resource": "user",
"track_change": true,
"conditions": map[string]any{
"andConditions": []any{
map[string]any{"column": "col1", "value": "value1"},
map[string]any{"column": "col2", "value": "value1"},
},
},
},
},
want: false,
@@ -245,9 +406,126 @@ func Test_triggerConfig_Check(t *testing.T) {
InstanceIDColumn: tt.fields.InstanceIDColumn,
ParentIDColumn: tt.fields.ParentIDColumn,
Resource: tt.fields.Resource,
TrackChange: tt.fields.TrackChange,
Conditions: tt.fields.Conditions,
}
got := c.Check(tt.args.lastRun)
assert.Equal(t, tt.want, got)
})
}
}
func Test_TriggerConditions_ToSQL(t *testing.T) {
type fields struct {
conditions TriggerConditions
}
type args struct {
table string
conditionsMet bool
}
tests := []struct {
name string
fields fields
args args
want string
}{
{
name: "single condition",
fields: fields{
conditions: TriggerCondition{
Column: "col1",
Value: "value1",
},
},
args: args{
table: "table",
conditionsMet: true,
},
want: "table.col1 = 'value1'",
},
{
name: "single condition not met",
fields: fields{
conditions: TriggerCondition{
Column: "col1",
Value: "value1",
},
},
args: args{
table: "table",
conditionsMet: false,
},
want: "table.col1 <> 'value1'",
},
{
name: "or condition",
fields: fields{
conditions: OrCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
},
args: args{
table: "table",
conditionsMet: true,
},
want: "(table.col1 = 'value1' OR table.col2 = 'value1')",
},
{
name: "or condition not met",
fields: fields{
conditions: OrCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
},
args: args{
table: "table",
conditionsMet: false,
},
want: "(table.col1 <> 'value1' AND table.col2 <> 'value1')",
},
{
name: "and condition",
fields: fields{
conditions: AndCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
},
args: args{
table: "table",
conditionsMet: true,
},
want: "(table.col1 = 'value1' AND table.col2 = 'value1')",
},
{
name: "and condition not met",
fields: fields{
conditions: AndCondition{
Conditions: []TriggerCondition{
{Column: "col1", Value: "value1"},
{Column: "col2", Value: "value1"},
},
},
},
args: args{
table: "table",
conditionsMet: false,
},
want: "(table.col1 <> 'value1' OR table.col2 <> 'value1')",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.fields.conditions.ToSQL(tt.args.table, tt.args.conditionsMet)
assert.Equal(t, tt.want, got)
})
}
}

View File

@@ -15,3 +15,25 @@ type ServicePingReport struct {
func (r *ServicePingReport) Kind() string {
return "service_ping_report"
}
// The following constants define the resource types for which counts are reported.
const (
ResourceCountInstance = "instance"
ResourceCountOrganization = "organization"
ResourceCountProject = "project"
ResourceCountUser = "user"
ResourceCountUserMachine = "user_machine"
ResourceCountIAMAdmin = "iam_admin"
ResourceCountIdentityProvider = "identity_provider"
ResourceCountIdentityProviderLDAP = "identity_provider_ldap"
ResourceCountActionV1 = "action_v1"
ResourceCountActionExecution = "execution"
ResourceCountActionExecutionTarget = "execution_target"
ResourceCountLoginPolicy = "login_policy"
ResourceCountPasswordComplexityPolicy = "password_complexity_policy"
ResourceCountPasswordExpiryPolicy = "password_expiry_policy"
ResourceCountLockoutPolicy = "lockout_policy"
ResourceCountEnforceMFA = "enforce_mfa"
ResourceCountPasswordChangeNotification = "password_change_notification"
ResourceCountScimProvisionedUser = "scim_provisioned_user"
)