diff --git a/docs/about/faq.md b/docs/about/faq.md index 6d66297e..3fda70f4 100644 --- a/docs/about/faq.md +++ b/docs/about/faq.md @@ -76,7 +76,7 @@ new "world map" is created for every node in the network. This means that under certain conditions, Headscale can likely handle 100s of devices (maybe more), if there is _little to no change_ happening in the network. For example, in Scenario 1, the process of computing the world map is -extremly demanding due to the size of the network, but when the map has been +extremely demanding due to the size of the network, but when the map has been created and the nodes are not changing, the Headscale instance will likely return to a very low resource usage until the next time there is an event requiring the new map. diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index abda802c..d2f39ff0 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -496,7 +496,7 @@ func NewHeadscaleDatabase( ID: "202407191627", Migrate: func(tx *gorm.DB) error { // Fix an issue where the automigration in GORM expected a constraint to - // exists that didnt, and add the one it wanted. + // exists that didn't, and add the one it wanted. // Fixes https://github.com/juanfont/headscale/issues/2351 if cfg.Type == types.DatabasePostgres { err := tx.Exec(` @@ -934,7 +934,7 @@ AND auth_key_id NOT IN ( }, // From this point, the following rules must be followed: // - NEVER use gorm.AutoMigrate, write the exact migration steps needed - // - AutoMigrate depends on the struct staying exactly the same, which it wont over time. + // - AutoMigrate depends on the struct staying exactly the same, which it won't over time. // - Never write migrations that requires foreign keys to be disabled. }, ) diff --git a/hscontrol/policy/v2/types_test.go b/hscontrol/policy/v2/types_test.go index 4aca150e..6f6b40d1 100644 --- a/hscontrol/policy/v2/types_test.go +++ b/hscontrol/policy/v2/types_test.go @@ -412,7 +412,7 @@ func TestUnmarshalPolicy(t *testing.T) { `, wantErr: `Hostname "derp" contains an invalid IP address: "10.0/42"`, }, - // TODO(kradalby): Figure out why this doesnt work. + // TODO(kradalby): Figure out why this doesn't work. // { // name: "invalid-hostname", // input: ` @@ -1074,7 +1074,7 @@ func TestResolvePolicy(t *testing.T) { ForcedTags: []string{"tag:anything"}, IPv4: ap("100.100.101.2"), }, - // not matchin pak tag + // not matching pak tag { User: users["testuser"], AuthKey: &types.PreAuthKey{ @@ -1108,7 +1108,7 @@ func TestResolvePolicy(t *testing.T) { ForcedTags: []string{"tag:anything"}, IPv4: ap("100.100.101.5"), }, - // not matchin pak tag + // not matching pak tag { User: users["groupuser"], AuthKey: &types.PreAuthKey{ @@ -1147,7 +1147,7 @@ func TestResolvePolicy(t *testing.T) { ForcedTags: []string{"tag:anything"}, IPv4: ap("100.100.101.10"), }, - // not matchin pak tag + // not matching pak tag { AuthKey: &types.PreAuthKey{ Tags: []string{"tag:alsotagged"}, @@ -1159,7 +1159,7 @@ func TestResolvePolicy(t *testing.T) { ForcedTags: []string{"tag:test"}, IPv4: ap("100.100.101.234"), }, - // not matchin pak tag + // not matching pak tag { AuthKey: &types.PreAuthKey{ Tags: []string{"tag:test"}, diff --git a/hscontrol/routes/primary.go b/hscontrol/routes/primary.go index f65d9122..55547ccb 100644 --- a/hscontrol/routes/primary.go +++ b/hscontrol/routes/primary.go @@ -38,7 +38,7 @@ func New() *PrimaryRoutes { // updatePrimaryLocked recalculates the primary routes and updates the internal state. // It returns true if the primary routes have changed. // It is assumed that the caller holds the lock. -// The algorthm is as follows: +// The algorithm is as follows: // 1. Reset the primaries map. // 2. Iterate over the routes and count the number of times a prefix is advertised. // 3. If a prefix is advertised by at least two nodes, it is a primary route. diff --git a/integration/route_test.go b/integration/route_test.go index 64677aec..aa6b9e2e 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -1334,10 +1334,10 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) { web := services[0] webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1)) - // We cant mess to much with ip forwarding in containers so + // We can't mess to much with ip forwarding in containers so // we settle for a simple ping here. // Direct is false since we use internal DERP which means we - // cant discover a direct path between docker networks. + // can't discover a direct path between docker networks. err = user2c.Ping(webip.String(), tsic.WithPingUntilDirect(false), tsic.WithPingCount(1), @@ -1693,7 +1693,7 @@ func TestAutoApproveMultiNetwork(t *testing.T) { // with an additional tsOpt which advertises the route as part // of the `tailscale up` command. If we do this as part of the // scenario creation, it will be added to all nodes and turn - // into a HA node, which isnt something we are testing here. + // into a HA node, which isn't something we are testing here. routerUsernet1, err := scenario.CreateTailscaleNode("head", tsOpts...) require.NoError(t, err) defer routerUsernet1.Shutdown() diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index 3e4847eb..1818c16a 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -202,7 +202,7 @@ func WithExtraLoginArgs(args []string) Option { } } -// WithAcceptRoutes tells the node to accept incomming routes. +// WithAcceptRoutes tells the node to accept incoming routes. func WithAcceptRoutes() Option { return func(tsic *TailscaleInContainer) { tsic.withAcceptRoutes = true