mirror of
https://github.com/juanfont/headscale.git
synced 2026-01-23 02:24:10 +00:00
all: disable thelper linter and clean up nolint comments
Disable the thelper linter which triggers on inline test closures in table-driven tests. These closures are intentionally not standalone helpers and don't benefit from t.Helper(). Also remove explanatory comments from nolint directives throughout the codebase as they add noise without providing significant value.
This commit is contained in:
parent
de1f9b90d5
commit
b36438bf90
66 changed files with 397 additions and 276 deletions
|
|
@ -25,6 +25,7 @@ linters:
|
|||
- revive
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- thelper
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
//nolint:gosec // G101: This is a flag name, not a credential
|
||||
//nolint:gosec
|
||||
bypassFlag = "bypass-grpc-and-access-database-directly"
|
||||
)
|
||||
|
||||
|
|
@ -178,6 +178,7 @@ var setPolicy = &cobra.Command{
|
|||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
//nolint:noinlineerr
|
||||
if _, err := client.SetPolicy(ctx, request); err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Failed to set ACL Policy: %s", err), output)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,6 +100,7 @@ var createUserCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if pictureURL, _ := cmd.Flags().GetString("picture-url"); pictureURL != "" {
|
||||
//nolint:noinlineerr
|
||||
if _, err := url.Parse(pictureURL); err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ func cleanupBeforeTest(ctx context.Context) error {
|
|||
return fmt.Errorf("failed to clean stale test containers: %w", err)
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := pruneDockerNetworks(ctx); err != nil {
|
||||
return fmt.Errorf("failed to prune networks: %w", err)
|
||||
}
|
||||
|
|
@ -55,7 +56,7 @@ func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID, runI
|
|||
|
||||
// killTestContainers terminates and removes all test containers.
|
||||
func killTestContainers(ctx context.Context) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -110,7 +111,7 @@ func killTestContainers(ctx context.Context) error {
|
|||
// This function filters containers by the hi.run-id label to only affect containers
|
||||
// belonging to the specified test run, leaving other concurrent test runs untouched.
|
||||
func killTestContainersByRunID(ctx context.Context, runID string) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -153,7 +154,7 @@ func killTestContainersByRunID(ctx context.Context, runID string) error {
|
|||
// This is useful for cleaning up leftover containers from previous crashed or interrupted test runs
|
||||
// without interfering with currently running concurrent tests.
|
||||
func cleanupStaleTestContainers(ctx context.Context) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -228,7 +229,7 @@ func removeContainerWithRetry(ctx context.Context, cli *client.Client, container
|
|||
|
||||
// pruneDockerNetworks removes unused Docker networks.
|
||||
func pruneDockerNetworks(ctx context.Context) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -251,7 +252,7 @@ func pruneDockerNetworks(ctx context.Context) error {
|
|||
|
||||
// cleanOldImages removes test-related and old dangling Docker images.
|
||||
func cleanOldImages(ctx context.Context) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -304,7 +305,7 @@ func cleanOldImages(ctx context.Context) error {
|
|||
|
||||
// cleanCacheVolume removes the Docker volume used for Go module cache.
|
||||
func cleanCacheVolume(ctx context.Context) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
|
|||
|
|
@ -38,8 +38,10 @@ const (
|
|||
)
|
||||
|
||||
// runTestContainer executes integration tests in a Docker container.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func runTestContainer(ctx context.Context, config *RunConfig) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -62,6 +64,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
|||
}
|
||||
|
||||
const dirPerm = 0o755
|
||||
//nolint:noinlineerr
|
||||
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
|
@ -83,6 +86,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
|||
}
|
||||
|
||||
imageName := "golang:" + config.GoVersion
|
||||
//nolint:noinlineerr
|
||||
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
|
||||
return fmt.Errorf("failed to ensure image availability: %w", err)
|
||||
}
|
||||
|
|
@ -96,6 +100,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
|||
log.Printf("Created container: %s", resp.ID)
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to start container: %w", err)
|
||||
}
|
||||
|
|
@ -111,7 +116,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
|||
if config.Stats {
|
||||
var err error
|
||||
|
||||
//nolint:contextcheck // NewStatsCollector internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
statsCollector, err = NewStatsCollector()
|
||||
if err != nil {
|
||||
if config.Verbose {
|
||||
|
|
@ -145,6 +150,7 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
|
|||
}
|
||||
|
||||
// Extract artifacts from test containers before cleanup
|
||||
//nolint:noinlineerr
|
||||
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
|
||||
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
|
||||
}
|
||||
|
|
@ -424,6 +430,7 @@ func isContainerFinalized(state *container.State) bool {
|
|||
func findProjectRoot(startPath string) string {
|
||||
current := startPath
|
||||
for {
|
||||
//nolint:noinlineerr
|
||||
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
|
||||
return current
|
||||
}
|
||||
|
|
@ -496,6 +503,7 @@ func getCurrentDockerContext() (*DockerContext, error) {
|
|||
}
|
||||
|
||||
var contexts []DockerContext
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal(output, &contexts); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse docker context: %w", err)
|
||||
}
|
||||
|
|
@ -634,7 +642,7 @@ func listControlFiles(logsDir string) {
|
|||
|
||||
// extractArtifactsFromContainers collects container logs and files from the specific test run.
|
||||
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Docker client: %w", err)
|
||||
|
|
@ -740,6 +748,7 @@ func extractContainerArtifacts(ctx context.Context, cli *client.Client, containe
|
|||
}
|
||||
|
||||
// Extract container logs
|
||||
//nolint:noinlineerr
|
||||
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
|
||||
return fmt.Errorf("failed to extract logs: %w", err)
|
||||
}
|
||||
|
|
@ -787,13 +796,13 @@ func extractContainerLogs(ctx context.Context, cli *client.Client, containerID,
|
|||
}
|
||||
|
||||
// Write stdout logs
|
||||
//nolint:gosec // G306: Log files are meant to be world-readable
|
||||
//nolint:gosec,mnd,noinlineerr
|
||||
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stdout log: %w", err)
|
||||
}
|
||||
|
||||
// Write stderr logs
|
||||
//nolint:gosec // G306: Log files are meant to be world-readable
|
||||
//nolint:gosec,mnd,noinlineerr
|
||||
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write stderr log: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,15 +38,15 @@ func runDoctorCheck(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// Check 3: Go installation
|
||||
//nolint:contextcheck // These checks don't need context
|
||||
//nolint:contextcheck
|
||||
results = append(results, checkGoInstallation())
|
||||
|
||||
// Check 4: Git repository
|
||||
//nolint:contextcheck // These checks don't need context
|
||||
//nolint:contextcheck
|
||||
results = append(results, checkGitRepository())
|
||||
|
||||
// Check 5: Required files
|
||||
//nolint:contextcheck // These checks don't need context
|
||||
//nolint:contextcheck
|
||||
results = append(results, checkRequiredFiles())
|
||||
|
||||
// Display results
|
||||
|
|
@ -89,7 +89,7 @@ func checkDockerBinary() DoctorResult {
|
|||
|
||||
// checkDockerDaemon verifies Docker daemon is running and accessible.
|
||||
func checkDockerDaemon(ctx context.Context) DoctorResult {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
|
|
@ -129,7 +129,7 @@ func checkDockerDaemon(ctx context.Context) DoctorResult {
|
|||
|
||||
// checkDockerContext verifies Docker context configuration.
|
||||
func checkDockerContext(_ context.Context) DoctorResult {
|
||||
//nolint:contextcheck // getCurrentDockerContext doesn't accept context
|
||||
//nolint:contextcheck
|
||||
contextInfo, err := getCurrentDockerContext()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
|
|
@ -160,7 +160,7 @@ func checkDockerContext(_ context.Context) DoctorResult {
|
|||
|
||||
// checkDockerSocket verifies Docker socket accessibility.
|
||||
func checkDockerSocket(ctx context.Context) DoctorResult {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
|
|
@ -198,7 +198,7 @@ func checkDockerSocket(ctx context.Context) DoctorResult {
|
|||
|
||||
// checkGolangImage verifies the golang Docker image is available locally or can be pulled.
|
||||
func checkGolangImage(ctx context.Context) DoctorResult {
|
||||
//nolint:contextcheck // createDockerClient internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
cli, err := createDockerClient()
|
||||
if err != nil {
|
||||
return DoctorResult{
|
||||
|
|
|
|||
|
|
@ -68,8 +68,10 @@ func runIntegrationTest(env *command.Env) error {
|
|||
func detectGoVersion() string {
|
||||
goModPath := filepath.Join("..", "..", "go.mod")
|
||||
|
||||
//nolint:noinlineerr
|
||||
if _, err := os.Stat("go.mod"); err == nil {
|
||||
goModPath = "go.mod"
|
||||
//nolint:noinlineerr
|
||||
} else if _, err := os.Stat("../../go.mod"); err == nil {
|
||||
goModPath = "../../go.mod"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
|
|||
case <-derpTickerChan:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
|
||||
//nolint:contextcheck // GetDERPMap internal functions don't accept context
|
||||
//nolint:contextcheck
|
||||
derpMap, err := backoff.Retry(ctx, func() (*tailcfg.DERPMap, error) {
|
||||
derpMap, err := derp.GetDERPMap(h.cfg.DERP)
|
||||
if err != nil {
|
||||
|
|
@ -407,6 +407,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
|
|||
writeUnauthorized := func(statusCode int) {
|
||||
writer.WriteHeader(statusCode)
|
||||
|
||||
//nolint:noinlineerr
|
||||
if _, err := writer.Write([]byte("Unauthorized")); err != nil {
|
||||
log.Error().Err(err).Msg("writing HTTP response failed")
|
||||
}
|
||||
|
|
@ -886,7 +887,7 @@ func (h *Headscale) Serve() error {
|
|||
// Close state connections
|
||||
info("closing state and database")
|
||||
|
||||
//nolint:contextcheck // Close method signature does not accept context
|
||||
//nolint:contextcheck
|
||||
err = h.state.Close()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to close state")
|
||||
|
|
|
|||
|
|
@ -356,7 +356,7 @@ func (h *Headscale) handleRegisterWithAuthKey(
|
|||
// If node is not valid, it means an ephemeral node was deleted during logout
|
||||
if !node.Valid() {
|
||||
h.Change(changed)
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
// This is a bit of a back and forth, but we have a bit of a chicken and egg
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ type interactiveStep struct {
|
|||
callAuthPath bool // Real call to HandleNodeFromAuthPath, not mocked
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func TestAuthenticationFlows(t *testing.T) {
|
||||
// Shared test keys for consistent behavior across test cases
|
||||
machineKey1 := key.NewMachine()
|
||||
|
|
@ -76,6 +77,8 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
{
|
||||
name: "preauth_key_valid_new_node",
|
||||
setupFunc: func(t *testing.T, app *Headscale) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
user := app.state.CreateUserForTest("preauth-user")
|
||||
|
||||
pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)
|
||||
|
|
@ -97,7 +100,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -119,6 +122,8 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
{
|
||||
name: "preauth_key_reusable_multiple_nodes",
|
||||
setupFunc: func(t *testing.T, app *Headscale) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
user := app.state.CreateUserForTest("reusable-user")
|
||||
|
||||
pak, err := app.state.CreatePreAuthKey(user.TypedID(), true, false, nil, nil)
|
||||
|
|
@ -163,7 +168,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey2.Public() },
|
||||
machineKey: machineKey2.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -232,7 +237,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey2.Public() },
|
||||
machineKey: machineKey2.Public,
|
||||
wantError: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
// First node should exist, second should not
|
||||
|
|
@ -266,7 +271,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
|
||||
|
|
@ -299,7 +304,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -336,7 +341,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -365,7 +370,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -433,7 +438,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(-1 * time.Hour), // Past expiry = logout
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
wantExpired: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
|
|
@ -488,7 +493,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(-1 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey2.Public() }, // Different machine key
|
||||
machineKey: machineKey2.Public, // Different machine key
|
||||
wantError: true,
|
||||
},
|
||||
// TEST: Existing node cannot extend expiry without re-auth
|
||||
|
|
@ -538,7 +543,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(48 * time.Hour), // Future time = extend attempt
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
// TEST: Expired node must re-authenticate
|
||||
|
|
@ -601,7 +606,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour), // Future expiry
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantExpired: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.NodeKeyExpired)
|
||||
|
|
@ -655,7 +660,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(-1 * time.Hour), // Logout
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantExpired: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.NodeKeyExpired)
|
||||
|
|
@ -711,7 +716,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
NodeKey: nodeKey1.Public(),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -749,7 +754,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
NodeKey: nodeKey1.Public(),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
// TEST: Invalid followup URL is rejected
|
||||
|
|
@ -768,7 +773,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
NodeKey: nodeKey1.Public(),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
// TEST: Non-existent registration ID is rejected
|
||||
|
|
@ -787,7 +792,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
NodeKey: nodeKey1.Public(),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
|
||||
|
|
@ -823,7 +828,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -861,7 +866,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -908,7 +913,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
|
||||
|
|
@ -942,7 +947,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1094,7 +1099,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(48 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1158,7 +1163,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(48 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuthURL: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.Contains(t, resp.AuthURL, "register/")
|
||||
|
|
@ -1227,7 +1232,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1277,7 +1282,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Time{}, // Zero time
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1324,7 +1329,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1380,7 +1385,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: false, // Should not be authorized yet - needs to use new AuthURL
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
// Should get a new AuthURL, not an error
|
||||
|
|
@ -1405,7 +1410,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
NodeKey: nodeKey1.Public(),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
// TEST: Wrong followup path format is rejected
|
||||
|
|
@ -1424,7 +1429,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
NodeKey: nodeKey1.Public(),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantError: true,
|
||||
},
|
||||
|
||||
|
|
@ -1455,7 +1460,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -1509,7 +1514,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1548,7 +1553,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -1591,7 +1596,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1635,7 +1640,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(12 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1707,7 +1712,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
assert.True(t, resp.MachineAuthorized)
|
||||
|
|
@ -1781,7 +1786,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() }, // Same machine key
|
||||
machineKey: machineKey1.Public, // Same machine key
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -1835,7 +1840,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: false, // Should not be authorized yet - needs to use new AuthURL
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
// Should get a new AuthURL, not an error
|
||||
|
|
@ -1845,13 +1850,13 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
|
||||
// Verify the response contains a valid registration URL
|
||||
authURL, err := url.Parse(resp.AuthURL)
|
||||
assert.NoError(t, err, "AuthURL should be a valid URL")
|
||||
require.NoError(t, err, "AuthURL should be a valid URL")
|
||||
assert.True(t, strings.HasPrefix(authURL.Path, "/register/"), "AuthURL path should start with /register/")
|
||||
|
||||
// Extract and validate the new registration ID exists in cache
|
||||
newRegIDStr := strings.TrimPrefix(authURL.Path, "/register/")
|
||||
newRegID, err := types.RegistrationIDFromString(newRegIDStr)
|
||||
assert.NoError(t, err, "should be able to parse new registration ID")
|
||||
require.NoError(t, err, "should be able to parse new registration ID")
|
||||
|
||||
// Verify new registration entry exists in cache
|
||||
_, found := app.state.GetRegistrationCacheEntry(newRegID)
|
||||
|
|
@ -1905,7 +1910,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now(), // Exactly now (edge case between past and future)
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
wantAuth: true,
|
||||
wantExpired: true,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
|
|
@ -1937,7 +1942,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey2.Public() },
|
||||
machineKey: machineKey2.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -2003,7 +2008,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -2049,7 +2054,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -2103,7 +2108,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
// This test validates concurrent interactive registration attempts
|
||||
assert.Contains(t, resp.AuthURL, "/register/")
|
||||
|
|
@ -2211,7 +2216,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -2255,7 +2260,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
requiresInteractiveFlow: true,
|
||||
interactiveSteps: []interactiveStep{
|
||||
{stepType: stepTypeInitialRequest, expectAuthURL: true, expectCacheEntry: true},
|
||||
|
|
@ -2293,7 +2298,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
// Get initial AuthURL and extract registration ID
|
||||
authURL := resp.AuthURL
|
||||
|
|
@ -2315,7 +2320,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
nil,
|
||||
"error-test-method",
|
||||
)
|
||||
assert.Error(t, err, "should fail with invalid user ID")
|
||||
require.Error(t, err, "should fail with invalid user ID")
|
||||
|
||||
// Cache entry should still exist after auth error (for retry scenarios)
|
||||
_, stillFound := app.state.GetRegistrationCacheEntry(registrationID)
|
||||
|
|
@ -2347,7 +2352,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
// Test multiple interactive registration attempts for the same node can coexist
|
||||
authURL1 := resp.AuthURL
|
||||
|
|
@ -2407,7 +2412,7 @@ func TestAuthenticationFlows(t *testing.T) {
|
|||
Expiry: time.Now().Add(24 * time.Hour),
|
||||
}
|
||||
},
|
||||
machineKey: func() key.MachinePublic { return machineKey1.Public() },
|
||||
machineKey: machineKey1.Public,
|
||||
validate: func(t *testing.T, resp *tailcfg.RegisterResponse, app *Headscale) {
|
||||
authURL1 := resp.AuthURL
|
||||
regID1, err := extractRegistrationIDFromAuthURL(authURL1)
|
||||
|
|
@ -2594,6 +2599,8 @@ func runInteractiveWorkflowTest(t *testing.T, tt struct {
|
|||
validateCompleteResponse bool
|
||||
}, app *Headscale, dynamicValue string,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
// Build initial request
|
||||
req := tt.request(dynamicValue)
|
||||
machineKey := tt.machineKey()
|
||||
|
|
@ -2731,7 +2738,9 @@ func extractRegistrationIDFromAuthURL(authURL string) (types.RegistrationID, err
|
|||
}
|
||||
|
||||
// validateCompleteRegistrationResponse performs comprehensive validation of a registration response.
|
||||
func validateCompleteRegistrationResponse(t *testing.T, resp *tailcfg.RegisterResponse, originalReq tailcfg.RegisterRequest) {
|
||||
func validateCompleteRegistrationResponse(t *testing.T, resp *tailcfg.RegisterResponse, _ tailcfg.RegisterRequest) {
|
||||
t.Helper()
|
||||
|
||||
// Basic response validation
|
||||
require.NotNil(t, resp, "response should not be nil")
|
||||
require.True(t, resp.MachineAuthorized, "machine should be authorized")
|
||||
|
|
@ -3260,7 +3269,7 @@ func TestGitHubIssue2830_NodeRestartWithUsedPreAuthKey(t *testing.T) {
|
|||
restartResp, err := app.handleRegister(context.Background(), restartReq, machineKey.Public())
|
||||
|
||||
// This is the assertion that currently FAILS in v0.27.0
|
||||
assert.NoError(t, err, "BUG: existing node re-registration with its own used pre-auth key should succeed")
|
||||
require.NoError(t, err, "BUG: existing node re-registration with its own used pre-auth key should succeed")
|
||||
|
||||
if err != nil {
|
||||
t.Logf("Error received (this is the bug): %v", err)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
apiKeyPrefix = "hskey-api-" //nolint:gosec // This is a prefix, not a credential
|
||||
apiKeyPrefix = "hskey-api-" //nolint:gosec
|
||||
apiKeyPrefixLength = 12
|
||||
apiKeyHashLength = 64
|
||||
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ func NewHeadscaleDatabase(
|
|||
ID: "202501221827",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
// Remove any invalid routes associated with a node that does not exist.
|
||||
//nolint:staticcheck // SA1019: types.Route kept for GORM migrations only
|
||||
//nolint:staticcheck
|
||||
if tx.Migrator().HasTable(&types.Route{}) && tx.Migrator().HasTable(&types.Node{}) {
|
||||
err := tx.Exec("delete from routes where node_id not in (select id from nodes)").Error
|
||||
if err != nil {
|
||||
|
|
@ -84,7 +84,7 @@ func NewHeadscaleDatabase(
|
|||
}
|
||||
|
||||
// Remove any invalid routes without a node_id.
|
||||
//nolint:staticcheck // SA1019: types.Route kept for GORM migrations only
|
||||
//nolint:staticcheck
|
||||
if tx.Migrator().HasTable(&types.Route{}) {
|
||||
err := tx.Exec("delete from routes where node_id is null").Error
|
||||
if err != nil {
|
||||
|
|
@ -92,7 +92,7 @@ func NewHeadscaleDatabase(
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:staticcheck // SA1019: types.Route kept for GORM migrations only
|
||||
//nolint:staticcheck
|
||||
err := tx.AutoMigrate(&types.Route{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("automigrating types.Route: %w", err)
|
||||
|
|
@ -158,7 +158,7 @@ AND auth_key_id NOT IN (
|
|||
|
||||
nodeRoutes := map[uint64][]netip.Prefix{}
|
||||
|
||||
//nolint:staticcheck // SA1019: types.Route kept for GORM migrations only
|
||||
//nolint:staticcheck
|
||||
var routes []types.Route
|
||||
|
||||
err = tx.Find(&routes).Error
|
||||
|
|
@ -188,7 +188,7 @@ AND auth_key_id NOT IN (
|
|||
}
|
||||
|
||||
// Drop the old table.
|
||||
//nolint:staticcheck // SA1019: types.Route kept for GORM migrations only
|
||||
//nolint:staticcheck
|
||||
_ = tx.Migrator().DropTable(&types.Route{})
|
||||
|
||||
return nil
|
||||
|
|
@ -798,6 +798,7 @@ AND auth_key_id NOT IN (
|
|||
},
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
|
|
@ -932,6 +933,7 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
|||
|
||||
// Get the current foreign key status
|
||||
var fkOriginallyEnabled int
|
||||
//nolint:noinlineerr
|
||||
if err := dbConn.Raw("PRAGMA foreign_keys").Scan(&fkOriginallyEnabled).Error; err != nil {
|
||||
return fmt.Errorf("checking foreign key status: %w", err)
|
||||
}
|
||||
|
|
@ -980,11 +982,13 @@ func runMigrations(cfg types.DatabaseConfig, dbConn *gorm.DB, migrations *gormig
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := dbConn.Exec("PRAGMA foreign_keys = ON").Error; err != nil {
|
||||
return fmt.Errorf("restoring foreign keys: %w", err)
|
||||
}
|
||||
|
||||
// Run the rest of the migrations
|
||||
//nolint:noinlineerr
|
||||
if err := migrations.Migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
|||
deletionWg.Add(numNodes)
|
||||
|
||||
for i := 1; i <= numNodes; i++ {
|
||||
gc.Schedule(types.NodeID(i), expiry) //nolint:gosec // G115: Test code with controlled values
|
||||
gc.Schedule(types.NodeID(i), expiry) //nolint:gosec
|
||||
}
|
||||
|
||||
// Wait for all scheduled deletions to complete
|
||||
|
|
@ -70,7 +70,7 @@ func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
|
|||
|
||||
// Schedule and immediately cancel to test that part of the code
|
||||
for i := numNodes + 1; i <= numNodes*2; i++ {
|
||||
nodeID := types.NodeID(i) //nolint:gosec // G115: Test code with controlled values
|
||||
nodeID := types.NodeID(i) //nolint:gosec
|
||||
gc.Schedule(nodeID, time.Hour)
|
||||
gc.Cancel(nodeID)
|
||||
}
|
||||
|
|
@ -394,7 +394,7 @@ func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
|
|||
case <-stopScheduling:
|
||||
return
|
||||
default:
|
||||
nodeID := types.NodeID(baseNodeID + j + 1) //nolint:gosec // G115: Test code with controlled values
|
||||
nodeID := types.NodeID(baseNodeID + j + 1) //nolint:gosec
|
||||
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
|
||||
atomic.AddInt64(&scheduledCount, 1)
|
||||
|
||||
|
|
|
|||
|
|
@ -253,6 +253,7 @@ func SetApprovedRoutes(
|
|||
return err
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil {
|
||||
return fmt.Errorf("updating approved routes: %w", err)
|
||||
}
|
||||
|
|
@ -655,7 +656,7 @@ func (hsdb *HSDatabase) CreateNodeForTest(user *types.User, hostname ...string)
|
|||
panic("CreateNodeForTest requires a valid user")
|
||||
}
|
||||
|
||||
nodeName := "testnode"
|
||||
nodeName := "testnode" //nolint:goconst
|
||||
if len(hostname) > 0 && hostname[0] != "" {
|
||||
nodeName = hostname[0]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ func TestExpireNode(t *testing.T) {
|
|||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:staticcheck // SA4006: pak is used in new(pak.ID) below
|
||||
//nolint:staticcheck
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -143,7 +143,7 @@ func TestSetTags(t *testing.T) {
|
|||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:staticcheck // SA4006: pak is used in new(pak.ID) below
|
||||
//nolint:staticcheck
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -468,10 +468,10 @@ func TestAutoApproveRoutes(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
users, err := adb.ListUsers()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodes, err := adb.ListNodes()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
pm, err := pmf(users, nodes.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
|
@ -600,7 +600,7 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
|
|||
|
||||
// Use shorter expiry for faster tests
|
||||
for i := range want {
|
||||
go e.Schedule(types.NodeID(i), 100*time.Millisecond) //nolint:gosec // test code, no overflow risk
|
||||
go e.Schedule(types.NodeID(i), 100*time.Millisecond) //nolint:gosec
|
||||
}
|
||||
|
||||
// Wait for all deletions to complete
|
||||
|
|
@ -639,11 +639,11 @@ func TestListEphemeralNodes(t *testing.T) {
|
|||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:staticcheck // SA4006: pak is used in new(pak.ID) below
|
||||
//nolint:staticcheck
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:staticcheck // SA4006: pakEph is used in new(pakEph.ID) below
|
||||
//nolint:staticcheck
|
||||
pakEph, err := db.CreatePreAuthKey(user.TypedID(), false, true, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -724,6 +724,7 @@ func TestNodeNaming(t *testing.T) {
|
|||
// break your network, so they should be replaced when registering
|
||||
// a node.
|
||||
// https://github.com/juanfont/headscale/issues/2343
|
||||
//nolint:gosmopolitan
|
||||
nodeInvalidHostname := types.Node{
|
||||
MachineKey: key.NewMachine().Public(),
|
||||
NodeKey: key.NewNode().Public(),
|
||||
|
|
@ -822,25 +823,26 @@ func TestNodeNaming(t *testing.T) {
|
|||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "test")
|
||||
})
|
||||
assert.ErrorContains(t, err, "name is not unique")
|
||||
require.ErrorContains(t, err, "name is not unique")
|
||||
|
||||
// Rename invalid chars
|
||||
//nolint:gosmopolitan
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[2].ID, "我的电脑")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
require.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename too short
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[3].ID, "a")
|
||||
})
|
||||
assert.ErrorContains(t, err, "at least 2 characters")
|
||||
require.ErrorContains(t, err, "at least 2 characters")
|
||||
|
||||
// Rename with emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
return RenameNode(tx, nodes[0].ID, "hostname-with-💩")
|
||||
})
|
||||
assert.ErrorContains(t, err, "invalid characters")
|
||||
require.ErrorContains(t, err, "invalid characters")
|
||||
|
||||
// Rename with only emoji
|
||||
err = db.Write(func(tx *gorm.DB) error {
|
||||
|
|
@ -908,12 +910,12 @@ func TestRenameNodeComprehensive(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "chinese_chars_with_dash_rejected",
|
||||
newName: "server-北京-01",
|
||||
newName: "server-北京-01", //nolint:gosmopolitan
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
name: "chinese_only_rejected",
|
||||
newName: "我的电脑",
|
||||
newName: "我的电脑", //nolint:gosmopolitan
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
|
|
@ -923,7 +925,7 @@ func TestRenameNodeComprehensive(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "mixed_chinese_emoji_rejected",
|
||||
newName: "测试💻机器",
|
||||
newName: "测试💻机器", //nolint:gosmopolitan
|
||||
wantErr: "invalid characters",
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -102,6 +102,7 @@ func TestSQLiteDriverPragmaIntegration(t *testing.T) {
|
|||
defer db.Close()
|
||||
|
||||
// Test connection
|
||||
//nolint:noinlineerr
|
||||
if err := db.PingContext(context.Background()); err != nil {
|
||||
t.Fatalf("Failed to ping database: %v", err)
|
||||
}
|
||||
|
|
@ -181,11 +182,13 @@ func TestForeignKeyConstraintEnforcement(t *testing.T) {
|
|||
);
|
||||
`
|
||||
|
||||
//nolint:noinlineerr
|
||||
if _, err := db.ExecContext(context.Background(), schema); err != nil {
|
||||
t.Fatalf("Failed to create schema: %v", err)
|
||||
}
|
||||
|
||||
// Insert parent record
|
||||
//nolint:noinlineerr
|
||||
if _, err := db.ExecContext(context.Background(), "INSERT INTO parent (id, name) VALUES (1, 'Parent 1')"); err != nil {
|
||||
t.Fatalf("Failed to insert parent: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,6 +67,7 @@ func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect
|
|||
|
||||
ret := f.Call(args)
|
||||
if !ret[0].IsNil() {
|
||||
//nolint:forcetypeassert
|
||||
return decodingError(field.Name, ret[0].Interface().(error))
|
||||
}
|
||||
|
||||
|
|
@ -97,7 +98,7 @@ func (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflec
|
|||
// always comparable, particularly when reflection is involved:
|
||||
// https://dev.to/arxeiss/in-go-nil-is-not-equal-to-nil-sometimes-jn8
|
||||
if v == nil || (reflect.ValueOf(v).Kind() == reflect.Pointer && reflect.ValueOf(v).IsNil()) {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
b, err := v.MarshalText()
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@ func RenameUser(tx *gorm.DB, uid types.UserID, newName string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err = util.ValidateHostname(newName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ func TestDestroyUserErrors(t *testing.T) {
|
|||
user, err := db.CreateUser(types.User{Name: "test"})
|
||||
require.NoError(t, err)
|
||||
|
||||
//nolint:staticcheck // SA4006: pak is used in new(pak.ID) below
|
||||
//nolint:staticcheck
|
||||
pak, err := db.CreatePreAuthKey(user.TypedID(), false, false, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
|||
|
|
@ -161,9 +161,9 @@ func derpRandom() *rand.Rand {
|
|||
|
||||
derpRandomOnce.Do(func() {
|
||||
seed := cmp.Or(viper.GetString("dns.base_domain"), time.Now().String())
|
||||
//nolint:gosec // G404,G115: Intentionally using math/rand for deterministic DERP server ID
|
||||
//nolint:gosec
|
||||
rnd := rand.New(rand.NewSource(0))
|
||||
//nolint:gosec // G115: Checksum is always positive and fits in int64
|
||||
//nolint:gosec
|
||||
rnd.Seed(int64(crc64.Checksum([]byte(seed), crc64Table)))
|
||||
derpRandomInst = rnd
|
||||
})
|
||||
|
|
|
|||
|
|
@ -314,6 +314,7 @@ func DERPBootstrapDNSHandler(
|
|||
defer cancel()
|
||||
var resolver net.Resolver
|
||||
|
||||
//nolint:unqueryvet
|
||||
for _, region := range derpMap.Regions().All() {
|
||||
for _, node := range region.Nodes().All() { // we don't care if we override some nodes
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName())
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ func (e *ExtraRecordsMan) Run() {
|
|||
// and not watch it. We will therefore attempt to re-add it with a backoff.
|
||||
case fsnotify.Remove, fsnotify.Rename:
|
||||
_, err := backoff.Retry(context.Background(), func() (struct{}, error) {
|
||||
//nolint:noinlineerr
|
||||
if _, err := os.Stat(e.path); err != nil {
|
||||
return struct{}{}, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,6 +91,7 @@ func (h *Headscale) handleVerifyRequest(
|
|||
}
|
||||
|
||||
var derpAdmitClientRequest tailcfg.DERPAdmitClientRequest
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil {
|
||||
return NewHTTPError(http.StatusBadRequest, "Bad Request: invalid JSON", fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err))
|
||||
}
|
||||
|
|
@ -183,6 +184,7 @@ func (h *Headscale) HealthHandler(
|
|||
res.Status = "fail"
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := json.NewEncoder(writer).Encode(res); err != nil {
|
||||
log.Error().Err(err).Msg("failed to encode health response")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*t
|
|||
version := nc.version()
|
||||
|
||||
if r.IsEmpty() {
|
||||
return nil, nil //nolint:nilnil // Empty response means nothing to send
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
if nodeID == 0 {
|
||||
|
|
@ -99,7 +99,7 @@ func generateMapResponse(nc nodeConnection, mapper *mapper, r change.Change) (*t
|
|||
|
||||
// Handle self-only responses
|
||||
if r.IsSelfOnly() && r.TargetNode != nodeID {
|
||||
return nil, nil //nolint:nilnil // No response needed for other nodes when self-only
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
// Check if this is a self-update (the changed node is the receiving node).
|
||||
|
|
|
|||
|
|
@ -236,8 +236,8 @@ func setupBatcherWithTestData(
|
|||
}
|
||||
|
||||
derpMap, err := derp.GetDERPMap(cfg.DERP)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, derpMap)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, derpMap)
|
||||
|
||||
state.SetDERPMap(derpMap)
|
||||
|
||||
|
|
@ -1108,6 +1108,8 @@ func TestBatcherWorkQueueBatching(t *testing.T) {
|
|||
// The test verifies that channels are closed synchronously and deterministically
|
||||
// even when real node updates are being processed, ensuring no race conditions
|
||||
// occur during channel replacement with actual workload.
|
||||
//
|
||||
|
||||
func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
for _, batcherFunc := range allBatcherFunctions {
|
||||
t.Run(batcherFunc.name, func(t *testing.T) {
|
||||
|
|
@ -1330,6 +1332,8 @@ func TestBatcherWorkerChannelSafety(t *testing.T) {
|
|||
// real node data. The test validates that stable clients continue to function
|
||||
// normally and receive proper updates despite the connection churn from other clients,
|
||||
// ensuring system stability under concurrent load.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func TestBatcherConcurrentClients(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping concurrent client test in short mode")
|
||||
|
|
@ -1608,6 +1612,8 @@ func TestBatcherConcurrentClients(t *testing.T) {
|
|||
// It validates that the system remains stable with no deadlocks, panics, or
|
||||
// missed updates under sustained high load. The test uses real node data to
|
||||
// generate authentic update scenarios and tracks comprehensive statistics.
|
||||
//
|
||||
//nolint:gocyclo,thelper
|
||||
func XTestBatcherScalability(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping scalability test in short mode")
|
||||
|
|
@ -1636,6 +1642,7 @@ func XTestBatcherScalability(t *testing.T) {
|
|||
description string
|
||||
}
|
||||
|
||||
//nolint:prealloc
|
||||
var testCases []testCase
|
||||
|
||||
// Generate all combinations of the test matrix
|
||||
|
|
@ -2393,6 +2400,7 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func TestBatcherMultiConnection(t *testing.T) {
|
||||
for _, batcherFunc := range allBatcherFunctions {
|
||||
t.Run(batcherFunc.name, func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -278,6 +278,7 @@ func (b *MapResponseBuilder) WithPeerChangedPatch(changes []*tailcfg.PeerChange)
|
|||
|
||||
// WithPeersRemoved adds removed peer IDs.
|
||||
func (b *MapResponseBuilder) WithPeersRemoved(removedIDs ...types.NodeID) *MapResponseBuilder {
|
||||
//nolint:prealloc
|
||||
var tailscaleIDs []tailcfg.NodeID
|
||||
for _, id := range removedIDs {
|
||||
tailscaleIDs = append(tailscaleIDs, id.NodeID())
|
||||
|
|
|
|||
|
|
@ -340,7 +340,7 @@ func TestMapResponseBuilder_MultipleErrors(t *testing.T) {
|
|||
// Build should return a multierr
|
||||
data, err := result.Build()
|
||||
assert.Nil(t, data)
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
// The error should contain information about multiple errors
|
||||
assert.Contains(t, err.Error(), "multiple errors")
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ func (m *mapper) policyChangeResponse(
|
|||
// Convert tailcfg.NodeID to types.NodeID for WithPeersRemoved
|
||||
removedIDs := make([]types.NodeID, len(removedPeers))
|
||||
for i, id := range removedPeers {
|
||||
removedIDs[i] = types.NodeID(id) //nolint:gosec // NodeID types are equivalent
|
||||
removedIDs[i] = types.NodeID(id) //nolint:gosec
|
||||
}
|
||||
|
||||
builder.WithPeersRemoved(removedIDs...)
|
||||
|
|
@ -215,7 +215,7 @@ func (m *mapper) buildFromChange(
|
|||
resp *change.Change,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
if resp.IsEmpty() {
|
||||
return nil, nil //nolint:nilnil // Empty response means nothing to send, not an error
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
// If this is a self-update (the changed node is the receiving node),
|
||||
|
|
@ -307,7 +307,7 @@ func writeDebugMapResponse(
|
|||
|
||||
func (m *mapper) debugMapResponses() (map[types.NodeID][]tailcfg.MapResponse, error) {
|
||||
if debugDumpMapResponsePath == "" {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
return ReadMapResponsesFromDirectory(debugDumpMapResponsePath)
|
||||
|
|
|
|||
|
|
@ -244,7 +244,7 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
|||
return
|
||||
}
|
||||
|
||||
//nolint:contextcheck // IIFE uses context from outer scope implicitly
|
||||
//nolint:contextcheck
|
||||
registerRequest, registerResponse := func() (*tailcfg.RegisterRequest, *tailcfg.RegisterResponse) {
|
||||
var resp *tailcfg.RegisterResponse
|
||||
|
||||
|
|
@ -254,6 +254,7 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
|||
}
|
||||
|
||||
var regReq tailcfg.RegisterRequest
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal(body, ®Req); err != nil {
|
||||
return ®Req, regErr(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func NewAuthProviderOIDC(
|
|||
) (*AuthProviderOIDC, error) {
|
||||
var err error
|
||||
// grab oidc config if it hasn't been already
|
||||
//nolint:contextcheck // Initialization code - no parent context available
|
||||
//nolint:contextcheck
|
||||
oidcProvider, err := oidc.NewProvider(context.Background(), cfg.Issuer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating OIDC provider from issuer config: %w", err)
|
||||
|
|
@ -238,6 +238,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
|||
nodeExpiry := a.determineNodeExpiry(idToken.Expiry)
|
||||
|
||||
var claims types.OIDCClaims
|
||||
//nolint:noinlineerr
|
||||
if err := idToken.Claims(&claims); err != nil {
|
||||
httpError(writer, fmt.Errorf("decoding ID token claims: %w", err))
|
||||
return
|
||||
|
|
@ -338,6 +339,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
|
|||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
|
||||
//nolint:noinlineerr
|
||||
if _, err := writer.Write(content.Bytes()); err != nil {
|
||||
util.LogErr(err, "Failed to write HTTP response")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ func PolicyManagersForTest(pol []byte, users []types.User, nodes views.Slice[typ
|
|||
}
|
||||
|
||||
func PolicyManagerFuncsForTest(pol []byte) []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error) {
|
||||
//nolint:prealloc
|
||||
var polmanFuncs []func([]types.User, views.Slice[types.NodeView]) (PolicyManager, error)
|
||||
|
||||
polmanFuncs = append(polmanFuncs, func(u []types.User, n views.Slice[types.NodeView]) (PolicyManager, error) {
|
||||
|
|
|
|||
|
|
@ -327,7 +327,7 @@ func TestApproveRoutesWithPolicy_EdgeCases(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestApproveRoutesWithPolicy_NilPolicyManagerCase(t *testing.T) {
|
||||
//nolint:staticcheck // SA4006: user is used in new(user.ID) and new(user) below
|
||||
//nolint:staticcheck
|
||||
user := types.User{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "test",
|
||||
|
|
|
|||
|
|
@ -798,6 +798,7 @@ func TestReduceNodes(t *testing.T) {
|
|||
|
||||
func TestReduceNodesFromPolicy(t *testing.T) {
|
||||
n := func(id types.NodeID, ip, hostname, username string, routess ...string) *types.Node {
|
||||
//nolint:prealloc
|
||||
var routes []netip.Prefix
|
||||
for _, route := range routess {
|
||||
routes = append(routes, netip.MustParsePrefix(route))
|
||||
|
|
|
|||
|
|
@ -119,6 +119,8 @@ func (pol *Policy) compileFilterRulesForNode(
|
|||
// It returns a slice of filter rules because when an ACL has both autogroup:self
|
||||
// and other destinations, they need to be split into separate rules with different
|
||||
// source filtering logic.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
acl ACL,
|
||||
users types.Users,
|
||||
|
|
@ -284,13 +286,14 @@ func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func (pol *Policy) compileSSHPolicy(
|
||||
users types.Users,
|
||||
node types.NodeView,
|
||||
nodes views.Slice[types.NodeView],
|
||||
) (*tailcfg.SSHPolicy, error) {
|
||||
if pol == nil || pol.SSHs == nil || len(pol.SSHs) == 0 {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
log.Trace().Caller().Msgf("compiling SSH policy for node %q", node.Hostname())
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ func TestInvalidateAutogroupSelfCache(t *testing.T) {
|
|||
{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"},
|
||||
}
|
||||
|
||||
//nolint:goconst
|
||||
policy := `{
|
||||
"acls": [
|
||||
{
|
||||
|
|
@ -94,7 +95,7 @@ func TestInvalidateAutogroupSelfCache(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, n := range initialNodes {
|
||||
n.ID = types.NodeID(i + 1) //nolint:gosec // G115: Test code with small values
|
||||
n.ID = types.NodeID(i + 1) //nolint:gosec
|
||||
}
|
||||
|
||||
pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice())
|
||||
|
|
@ -187,7 +188,7 @@ func TestInvalidateAutogroupSelfCache(t *testing.T) {
|
|||
}
|
||||
|
||||
if !found {
|
||||
n.ID = types.NodeID(len(initialNodes) + i + 1) //nolint:gosec // G115: Test code with small values
|
||||
n.ID = types.NodeID(len(initialNodes) + i + 1) //nolint:gosec
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -753,8 +754,8 @@ func TestAutogroupSelfWithAdminOverride(t *testing.T) {
|
|||
Hostname: "admin-device",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
User: new(users[0]),
|
||||
UserID: new(users[0].ID),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
|
|
@ -764,8 +765,8 @@ func TestAutogroupSelfWithAdminOverride(t *testing.T) {
|
|||
Hostname: "user1-server",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
User: new(users[1]),
|
||||
UserID: new(users[1].ID),
|
||||
Tags: []string{"tag:server"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
|
@ -836,8 +837,8 @@ func TestAutogroupSelfSymmetricVisibility(t *testing.T) {
|
|||
Hostname: "device-a",
|
||||
IPv4: ap("100.64.0.1"),
|
||||
IPv6: ap("fd7a:115c:a1e0::1"),
|
||||
User: ptr.To(users[0]),
|
||||
UserID: ptr.To(users[0].ID),
|
||||
User: new(users[0]),
|
||||
UserID: new(users[0].ID),
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
||||
|
|
@ -847,8 +848,8 @@ func TestAutogroupSelfSymmetricVisibility(t *testing.T) {
|
|||
Hostname: "device-b",
|
||||
IPv4: ap("100.64.0.2"),
|
||||
IPv6: ap("fd7a:115c:a1e0::2"),
|
||||
User: ptr.To(users[1]),
|
||||
UserID: ptr.To(users[1].ID),
|
||||
User: new(users[1]),
|
||||
UserID: new(users[1].ID),
|
||||
Tags: []string{"tag:web"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ func (a Asterix) Resolve(_ *Policy, _ types.Users, nodes views.Slice[types.NodeV
|
|||
|
||||
// Username is a string that represents a username, it must contain an @.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Username string
|
||||
|
||||
func (u Username) Validate() error {
|
||||
|
|
@ -299,7 +299,7 @@ func (u Username) Resolve(_ *Policy, users types.Users, nodes views.Slice[types.
|
|||
|
||||
// Group is a special string which is always prefixed with `group:`.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Group string
|
||||
|
||||
func (g Group) Validate() error {
|
||||
|
|
@ -368,7 +368,7 @@ func (g Group) Resolve(p *Policy, users types.Users, nodes views.Slice[types.Nod
|
|||
|
||||
// Tag is a special string which is always prefixed with `tag:`.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Tag string
|
||||
|
||||
func (t Tag) Validate() error {
|
||||
|
|
@ -422,7 +422,7 @@ func (t Tag) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// Host is a string that represents a hostname.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Host string
|
||||
|
||||
func (h Host) Validate() error {
|
||||
|
|
@ -482,7 +482,7 @@ func (h Host) Resolve(p *Policy, _ types.Users, nodes views.Slice[types.NodeView
|
|||
return buildIPSetMultiErr(&ips, errs)
|
||||
}
|
||||
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Prefix netip.Prefix
|
||||
|
||||
func (p Prefix) Validate() error {
|
||||
|
|
@ -530,6 +530,7 @@ func (p *Prefix) UnmarshalJSON(b []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := p.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -572,7 +573,7 @@ func appendIfNodeHasIP(nodes views.Slice[types.NodeView], ips *netipx.IPSetBuild
|
|||
|
||||
// AutoGroup is a special string which is always prefixed with `autogroup:`.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type AutoGroup string
|
||||
|
||||
const (
|
||||
|
|
@ -622,6 +623,7 @@ func (ag AutoGroup) MarshalJSON() ([]byte, error) {
|
|||
func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[types.NodeView]) (*netipx.IPSet, error) {
|
||||
var build netipx.IPSetBuilder
|
||||
|
||||
//nolint:exhaustive
|
||||
switch ag {
|
||||
case AutoGroupInternet:
|
||||
return util.TheInternet(), nil
|
||||
|
|
@ -724,6 +726,7 @@ func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := ve.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -804,7 +807,7 @@ func (ve *AliasEnc) UnmarshalJSON(b []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Aliases []Alias
|
||||
|
||||
func (a *Aliases) UnmarshalJSON(b []byte) error {
|
||||
|
|
@ -1051,7 +1054,7 @@ type Usernames []Username
|
|||
|
||||
// Groups are a map of Group to a list of Username.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Groups map[Group]Usernames
|
||||
|
||||
func (g Groups) Contains(group *Group) error {
|
||||
|
|
@ -1146,7 +1149,7 @@ func (g *Groups) UnmarshalJSON(b []byte) error {
|
|||
|
||||
// Hosts are alias for IP addresses or subnets.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Hosts map[Host]Prefix
|
||||
|
||||
func (h *Hosts) UnmarshalJSON(b []byte) error {
|
||||
|
|
@ -1344,7 +1347,7 @@ func resolveAutoApprovers(p *Policy, users types.Users, nodes views.Slice[types.
|
|||
|
||||
// Action represents the action to take for an ACL rule.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Action string
|
||||
|
||||
const (
|
||||
|
|
@ -1353,7 +1356,7 @@ const (
|
|||
|
||||
// SSHAction represents the action to take for an SSH rule.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type SSHAction string
|
||||
|
||||
const (
|
||||
|
|
@ -1411,7 +1414,7 @@ func (a SSHAction) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// Protocol represents a network protocol with its IANA number and descriptions.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
|
|
@ -1439,6 +1442,7 @@ func (p Protocol) String() string {
|
|||
|
||||
// Description returns the human-readable description of the Protocol.
|
||||
func (p Protocol) Description() string {
|
||||
//nolint:exhaustive
|
||||
switch p {
|
||||
case ProtocolICMP:
|
||||
return "Internet Control Message Protocol"
|
||||
|
|
@ -1476,6 +1480,7 @@ func (p Protocol) Description() string {
|
|||
// parseProtocol converts a Protocol to its IANA protocol numbers.
|
||||
// Since validation happens during UnmarshalJSON, this method should not fail for valid Protocol values.
|
||||
func (p Protocol) parseProtocol() []int {
|
||||
//nolint:exhaustive
|
||||
switch p {
|
||||
case "":
|
||||
// Empty protocol applies to TCP and UDP traffic only
|
||||
|
|
@ -1532,6 +1537,7 @@ func (p *Protocol) UnmarshalJSON(b []byte) error {
|
|||
|
||||
// validate checks if the Protocol is valid.
|
||||
func (p Protocol) validate() error {
|
||||
//nolint:exhaustive
|
||||
switch p {
|
||||
case "", ProtocolICMP, ProtocolIGMP, ProtocolIPv4, ProtocolIPInIP,
|
||||
ProtocolTCP, ProtocolEGP, ProtocolIGP, ProtocolUDP, ProtocolGRE,
|
||||
|
|
@ -1598,6 +1604,7 @@ type ACL struct {
|
|||
func (a *ACL) UnmarshalJSON(b []byte) error {
|
||||
// First unmarshal into a map to filter out comment fields
|
||||
var raw map[string]any
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal(b, &raw, policyJSONOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -1623,6 +1630,7 @@ func (a *ACL) UnmarshalJSON(b []byte) error {
|
|||
var temp aclAlias
|
||||
|
||||
// Unmarshal into the temporary struct using the v2 JSON options
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal(filteredBytes, &temp, policyJSONOpts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -1759,6 +1767,8 @@ func validateAutogroupForSSHUser(user *AutoGroup) error {
|
|||
// the unmarshaling process.
|
||||
// It runs through all rules and checks if there are any inconsistencies
|
||||
// in the policy that needs to be addressed before it can be used.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (p *Policy) validate() error {
|
||||
if p == nil {
|
||||
panic("passed nil policy")
|
||||
|
|
@ -1808,14 +1818,15 @@ func (p *Policy) validate() error {
|
|||
}
|
||||
|
||||
for _, dst := range acl.Destinations {
|
||||
//nolint:gocritic
|
||||
switch dst.Alias.(type) {
|
||||
case *Host:
|
||||
h := dst.Alias.(*Host)
|
||||
h := dst.Alias.(*Host) //nolint:forcetypeassert
|
||||
if !p.Hosts.exist(*h) {
|
||||
errs = append(errs, fmt.Errorf("%w: %q - please define or remove the reference", ErrHostNotDefined, *h))
|
||||
}
|
||||
case *AutoGroup:
|
||||
ag := dst.Alias.(*AutoGroup)
|
||||
ag := dst.Alias.(*AutoGroup) //nolint:forcetypeassert
|
||||
|
||||
err := validateAutogroupSupported(ag)
|
||||
if err != nil {
|
||||
|
|
@ -1829,14 +1840,14 @@ func (p *Policy) validate() error {
|
|||
continue
|
||||
}
|
||||
case *Group:
|
||||
g := dst.Alias.(*Group)
|
||||
g := dst.Alias.(*Group) //nolint:forcetypeassert
|
||||
|
||||
err := p.Groups.Contains(g)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
case *Tag:
|
||||
tagOwner := dst.Alias.(*Tag)
|
||||
tagOwner := dst.Alias.(*Tag) //nolint:forcetypeassert
|
||||
|
||||
err := p.TagOwners.Contains(tagOwner)
|
||||
if err != nil {
|
||||
|
|
@ -2013,7 +2024,7 @@ type SSH struct {
|
|||
// SSHSrcAliases is a list of aliases that can be used as sources in an SSH rule.
|
||||
// It can be a list of usernames, groups, tags or autogroups.
|
||||
//
|
||||
//nolint:recvcheck // Mixed receivers: pointer for UnmarshalJSON, value for read-only methods
|
||||
//nolint:recvcheck
|
||||
type SSHSrcAliases []Alias
|
||||
|
||||
// MarshalJSON marshals the Groups to JSON.
|
||||
|
|
@ -2192,7 +2203,7 @@ func (u SSHUser) MarshalJSON() ([]byte, error) {
|
|||
// This is the only entrypoint of reading a policy from a file or other source.
|
||||
func unmarshalPolicy(b []byte) (*Policy, error) {
|
||||
if len(b) == 0 {
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
var policy Policy
|
||||
|
|
@ -2204,7 +2215,9 @@ func unmarshalPolicy(b []byte) (*Policy, error) {
|
|||
|
||||
ast.Standardize()
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err = json.Unmarshal(ast.Pack(), &policy, policyJSONOpts...); err != nil {
|
||||
//nolint:noinlineerr
|
||||
if serr, ok := errors.AsType[*json.SemanticError](err); ok && errors.Is(serr.Err, json.ErrUnknownName) {
|
||||
ptr := serr.JSONPointer
|
||||
name := ptr.LastToken()
|
||||
|
|
@ -2215,6 +2228,7 @@ func unmarshalPolicy(b []byte) (*Policy, error) {
|
|||
return nil, fmt.Errorf("parsing policy from bytes: %w", err)
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := policy.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1605,17 +1605,17 @@ func TestResolvePolicy(t *testing.T) {
|
|||
|
||||
// Extract users to variables so we can take their addresses
|
||||
// The variables below are all used in new() calls in the test cases.
|
||||
//nolint:staticcheck // SA4006: testuser is used in new(testuser) below
|
||||
//nolint:staticcheck
|
||||
testuser := users["testuser"]
|
||||
//nolint:staticcheck // SA4006: groupuser is used in new(groupuser) below
|
||||
//nolint:staticcheck
|
||||
groupuser := users["groupuser"]
|
||||
//nolint:staticcheck // SA4006: groupuser1 is used in new(groupuser1) below
|
||||
//nolint:staticcheck
|
||||
groupuser1 := users["groupuser1"]
|
||||
//nolint:staticcheck // SA4006: groupuser2 is used in new(groupuser2) below
|
||||
//nolint:staticcheck
|
||||
groupuser2 := users["groupuser2"]
|
||||
//nolint:staticcheck // SA4006: notme is used in new(notme) below
|
||||
//nolint:staticcheck
|
||||
notme := users["notme"]
|
||||
//nolint:staticcheck // SA4006: testuser2 is used in new(testuser2) below
|
||||
//nolint:staticcheck
|
||||
testuser2 := users["testuser2"]
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
|||
|
|
@ -214,6 +214,7 @@ func (m *mapSession) serveLongPoll() {
|
|||
// adding this before connecting it to the state ensure that
|
||||
// it does not miss any updates that might be sent in the split
|
||||
// time between the node connecting and the batcher being ready.
|
||||
//nolint:noinlineerr
|
||||
if err := m.h.mapBatcher.AddNode(m.node.ID, m.ch, m.capVer); err != nil {
|
||||
m.errf(err, "failed to add node to batcher")
|
||||
log.Error().Uint64("node.id", m.node.ID.Uint64()).Str("node.name", m.node.Hostname).Err(err).Msg("AddNode failed in poll session")
|
||||
|
|
@ -288,8 +289,9 @@ func (m *mapSession) writeMap(msg *tailcfg.MapResponse) error {
|
|||
jsonBody = zstdframe.AppendEncode(nil, jsonBody, zstdframe.FastestCompression)
|
||||
}
|
||||
|
||||
//nolint:prealloc
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
//nolint:gosec // G115: JSON response size will not exceed uint32 max
|
||||
//nolint:gosec
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(jsonBody)))
|
||||
data = append(data, jsonBody...)
|
||||
|
||||
|
|
@ -334,13 +336,13 @@ func (m *mapSession) logf(event *zerolog.Event) *zerolog.Event {
|
|||
Str("node.name", m.node.Hostname)
|
||||
}
|
||||
|
||||
//nolint:zerologlint // logf returns *zerolog.Event which is properly terminated with Msgf
|
||||
//nolint:zerologlint
|
||||
func (m *mapSession) infof(msg string, a ...any) { m.logf(log.Info().Caller()).Msgf(msg, a...) }
|
||||
|
||||
//nolint:zerologlint // logf returns *zerolog.Event which is properly terminated with Msgf
|
||||
//nolint:zerologlint
|
||||
func (m *mapSession) tracef(msg string, a ...any) { m.logf(log.Trace().Caller()).Msgf(msg, a...) }
|
||||
|
||||
//nolint:zerologlint // logf returns *zerolog.Event which is properly terminated with Msgf
|
||||
//nolint:zerologlint
|
||||
func (m *mapSession) errf(err error, msg string, a ...any) {
|
||||
m.logf(log.Error().Caller()).Err(err).Msgf(msg, a...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,8 +55,8 @@ var (
|
|||
})
|
||||
nodeStoreNodesCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
Name: "nodestore_nodes_total",
|
||||
Help: "Total number of nodes in the NodeStore",
|
||||
Name: "nodestore_nodes",
|
||||
Help: "Number of nodes in the NodeStore",
|
||||
})
|
||||
nodeStorePeersCalculationDuration = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: prometheusNamespace,
|
||||
|
|
|
|||
|
|
@ -107,6 +107,7 @@ func TestSnapshotFromNodes(t *testing.T) {
|
|||
return nodes, allowAllPeersFunc
|
||||
},
|
||||
validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) {
|
||||
t.Helper()
|
||||
assert.Len(t, snapshot.nodesByID, 3)
|
||||
assert.Len(t, snapshot.allNodes, 3)
|
||||
assert.Len(t, snapshot.peersByNode, 3)
|
||||
|
|
@ -136,6 +137,7 @@ func TestSnapshotFromNodes(t *testing.T) {
|
|||
return nodes, peersFunc
|
||||
},
|
||||
validate: func(t *testing.T, nodes map[types.NodeID]types.Node, snapshot Snapshot) {
|
||||
t.Helper()
|
||||
assert.Len(t, snapshot.nodesByID, 4)
|
||||
assert.Len(t, snapshot.allNodes, 4)
|
||||
assert.Len(t, snapshot.peersByNode, 4)
|
||||
|
|
@ -252,6 +254,7 @@ func TestNodeStoreOperations(t *testing.T) {
|
|||
{
|
||||
name: "create empty store and add single node",
|
||||
setupFunc: func(t *testing.T) *NodeStore {
|
||||
t.Helper()
|
||||
return NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)
|
||||
},
|
||||
steps: []testStep{
|
||||
|
|
@ -918,7 +921,7 @@ func TestNodeStoreConcurrentPutNode(t *testing.T) {
|
|||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
|
||||
//nolint:gosec // G115: Test code with controlled values
|
||||
//nolint:gosec
|
||||
node := createConcurrentTestNode(types.NodeID(nodeID), "concurrent-node")
|
||||
|
||||
resultNode := store.PutNode(node)
|
||||
|
|
@ -958,7 +961,7 @@ func TestNodeStoreBatchingEfficiency(t *testing.T) {
|
|||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
|
||||
//nolint:gosec // G115: Test code with controlled values
|
||||
//nolint:gosec
|
||||
node := createConcurrentTestNode(types.NodeID(nodeID), "batch-node")
|
||||
|
||||
resultNode := store.PutNode(node)
|
||||
|
|
@ -1067,7 +1070,7 @@ func TestNodeStoreResourceCleanup(t *testing.T) {
|
|||
|
||||
const ops = 100
|
||||
for i := range ops {
|
||||
nodeID := types.NodeID(i + 1) //nolint:gosec // G115: Test code with controlled values
|
||||
nodeID := types.NodeID(i + 1) //nolint:gosec
|
||||
node := createConcurrentTestNode(nodeID, "cleanup-node")
|
||||
resultNode := store.PutNode(node)
|
||||
assert.True(t, resultNode.Valid())
|
||||
|
|
@ -1111,7 +1114,7 @@ func TestNodeStoreOperationTimeout(t *testing.T) {
|
|||
|
||||
// Launch all PutNode operations concurrently
|
||||
for i := 1; i <= ops; i++ {
|
||||
nodeID := types.NodeID(i) //nolint:gosec // G115: Test code with controlled values
|
||||
nodeID := types.NodeID(i) //nolint:gosec
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
|
|
@ -1137,7 +1140,7 @@ func TestNodeStoreOperationTimeout(t *testing.T) {
|
|||
wg = sync.WaitGroup{}
|
||||
|
||||
for i := 1; i <= ops; i++ {
|
||||
nodeID := types.NodeID(i) //nolint:gosec // G115: Test code with controlled values
|
||||
nodeID := types.NodeID(i) //nolint:gosec
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
|
|
@ -1202,7 +1205,7 @@ func TestNodeStoreUpdateNonExistentNode(t *testing.T) {
|
|||
store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)
|
||||
store.Start()
|
||||
|
||||
nonExistentID := types.NodeID(999 + i) //nolint:gosec // G115: Test code with controlled values
|
||||
nonExistentID := types.NodeID(999 + i) //nolint:gosec
|
||||
updateCallCount := 0
|
||||
|
||||
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) starting\n", nonExistentID)
|
||||
|
|
@ -1226,7 +1229,7 @@ func BenchmarkNodeStoreAllocations(b *testing.B) {
|
|||
defer store.Stop()
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
nodeID := types.NodeID(i + 1) //nolint:gosec // G115: Benchmark code with controlled values
|
||||
nodeID := types.NodeID(i + 1) //nolint:gosec
|
||||
node := createConcurrentTestNode(nodeID, "bench-node")
|
||||
store.PutNode(node)
|
||||
store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
|
|
|
|||
|
|
@ -224,6 +224,7 @@ func (s *State) ReloadPolicy() ([]change.Change, error) {
|
|||
// propagate correctly when switching between policy types.
|
||||
s.nodeStore.RebuildPeerMaps()
|
||||
|
||||
//nolint:prealloc
|
||||
cs := []change.Change{change.PolicyChange()}
|
||||
|
||||
// Always call autoApproveNodes during policy reload, regardless of whether
|
||||
|
|
@ -254,6 +255,7 @@ func (s *State) ReloadPolicy() ([]change.Change, error) {
|
|||
// CreateUser creates a new user and updates the policy manager.
|
||||
// Returns the created user, change set, and any error.
|
||||
func (s *State) CreateUser(user types.User) (*types.User, change.Change, error) {
|
||||
//nolint:noinlineerr
|
||||
if err := s.db.DB.Save(&user).Error; err != nil {
|
||||
return nil, change.Change{}, fmt.Errorf("creating user: %w", err)
|
||||
}
|
||||
|
|
@ -288,6 +290,7 @@ func (s *State) UpdateUser(userID types.UserID, updateFn func(*types.User) error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := updateFn(user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -492,7 +495,7 @@ func (s *State) Connect(id types.NodeID) []change.Change {
|
|||
|
||||
// Disconnect marks a node as disconnected and updates its primary routes in the state.
|
||||
func (s *State) Disconnect(id types.NodeID) ([]change.Change, error) {
|
||||
//nolint:staticcheck // SA4006: now is used in new(now) below
|
||||
//nolint:staticcheck
|
||||
now := time.Now()
|
||||
|
||||
node, ok := s.nodeStore.UpdateNode(id, func(n *types.Node) {
|
||||
|
|
@ -817,6 +820,7 @@ func (s *State) ExpireExpiredNodes(lastCheck time.Time) (time.Time, []change.Cha
|
|||
|
||||
var updates []change.Change
|
||||
|
||||
//nolint:unqueryvet
|
||||
for _, node := range s.nodeStore.ListNodes().All() {
|
||||
if !node.Valid() {
|
||||
continue
|
||||
|
|
@ -1697,7 +1701,7 @@ func (s *State) HandleNodeFromPreAuthKey(
|
|||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
})
|
||||
if err != nil {
|
||||
return types.NodeView{}, change.Change{}, fmt.Errorf("writing node to database: %w", err)
|
||||
|
|
|
|||
|
|
@ -15,43 +15,43 @@ import (
|
|||
// Material for MkDocs design system - exact values from official docs.
|
||||
const (
|
||||
// Text colors - from --md-default-fg-color CSS variables.
|
||||
colorTextPrimary = "#000000de" //nolint:unused // rgba(0,0,0,0.87) - Body text
|
||||
colorTextSecondary = "#0000008a" //nolint:unused // rgba(0,0,0,0.54) - Headings (--md-default-fg-color--light)
|
||||
colorTextTertiary = "#00000052" //nolint:unused // rgba(0,0,0,0.32) - Lighter text
|
||||
colorTextLightest = "#00000012" //nolint:unused // rgba(0,0,0,0.07) - Lightest text
|
||||
colorTextPrimary = "#000000de" //nolint:unused
|
||||
colorTextSecondary = "#0000008a" //nolint:unused
|
||||
colorTextTertiary = "#00000052" //nolint:unused
|
||||
colorTextLightest = "#00000012" //nolint:unused
|
||||
|
||||
// Code colors - from --md-code-* CSS variables.
|
||||
colorCodeFg = "#36464e" //nolint:unused // Code text color (--md-code-fg-color)
|
||||
colorCodeBg = "#f5f5f5" //nolint:unused // Code background (--md-code-bg-color)
|
||||
colorCodeFg = "#36464e" //nolint:unused
|
||||
colorCodeBg = "#f5f5f5" //nolint:unused
|
||||
|
||||
// Border colors.
|
||||
colorBorderLight = "#e5e7eb" //nolint:unused // Light borders
|
||||
colorBorderMedium = "#d1d5db" //nolint:unused // Medium borders
|
||||
colorBorderLight = "#e5e7eb" //nolint:unused
|
||||
colorBorderMedium = "#d1d5db" //nolint:unused
|
||||
|
||||
// Background colors.
|
||||
colorBackgroundPage = "#ffffff" //nolint:unused // Page background
|
||||
colorBackgroundCard = "#ffffff" //nolint:unused // Card/content background
|
||||
colorBackgroundPage = "#ffffff" //nolint:unused
|
||||
colorBackgroundCard = "#ffffff" //nolint:unused
|
||||
|
||||
// Accent colors - from --md-primary/accent-fg-color.
|
||||
colorPrimaryAccent = "#4051b5" //nolint:unused // Primary accent (links)
|
||||
colorAccent = "#526cfe" //nolint:unused // Secondary accent
|
||||
colorPrimaryAccent = "#4051b5" //nolint:unused
|
||||
colorAccent = "#526cfe" //nolint:unused
|
||||
|
||||
// Success colors.
|
||||
colorSuccess = "#059669" //nolint:unused // Success states
|
||||
colorSuccessLight = "#d1fae5" //nolint:unused // Success backgrounds
|
||||
colorSuccess = "#059669" //nolint:unused
|
||||
colorSuccessLight = "#d1fae5" //nolint:unused
|
||||
)
|
||||
|
||||
// Spacing System
|
||||
// Based on 4px/8px base unit for consistent rhythm.
|
||||
// Uses rem units for scalability with user font size preferences.
|
||||
const (
|
||||
spaceXS = "0.25rem" //nolint:unused // 4px - Tight spacing
|
||||
spaceS = "0.5rem" //nolint:unused // 8px - Small spacing
|
||||
spaceM = "1rem" //nolint:unused // 16px - Medium spacing (base)
|
||||
spaceL = "1.5rem" //nolint:unused // 24px - Large spacing
|
||||
spaceXL = "2rem" //nolint:unused // 32px - Extra large spacing
|
||||
space2XL = "3rem" //nolint:unused // 48px - 2x extra large spacing
|
||||
space3XL = "4rem" //nolint:unused // 64px - 3x extra large spacing
|
||||
spaceXS = "0.25rem" //nolint:unused
|
||||
spaceS = "0.5rem" //nolint:unused
|
||||
spaceM = "1rem" //nolint:unused
|
||||
spaceL = "1.5rem" //nolint:unused
|
||||
spaceXL = "2rem" //nolint:unused
|
||||
space2XL = "3rem" //nolint:unused
|
||||
space3XL = "4rem" //nolint:unused
|
||||
)
|
||||
|
||||
// Typography System
|
||||
|
|
@ -63,26 +63,26 @@ const (
|
|||
fontFamilyCode = `"Roboto Mono", "SF Mono", Monaco, "Cascadia Code", Consolas, "Courier New", monospace` //nolint:unused
|
||||
|
||||
// Font sizes - from .md-typeset CSS rules.
|
||||
fontSizeBase = "0.8rem" //nolint:unused // 12.8px - Base text (.md-typeset)
|
||||
fontSizeH1 = "2em" //nolint:unused // 2x base - Main headings
|
||||
fontSizeH2 = "1.5625em" //nolint:unused // 1.5625x base - Section headings
|
||||
fontSizeH3 = "1.25em" //nolint:unused // 1.25x base - Subsection headings
|
||||
fontSizeSmall = "0.8em" //nolint:unused // 0.8x base - Small text
|
||||
fontSizeCode = "0.85em" //nolint:unused // 0.85x base - Inline code
|
||||
fontSizeBase = "0.8rem" //nolint:unused
|
||||
fontSizeH1 = "2em" //nolint:unused
|
||||
fontSizeH2 = "1.5625em" //nolint:unused
|
||||
fontSizeH3 = "1.25em" //nolint:unused
|
||||
fontSizeSmall = "0.8em" //nolint:unused
|
||||
fontSizeCode = "0.85em" //nolint:unused
|
||||
|
||||
// Line heights - from .md-typeset CSS rules.
|
||||
lineHeightBase = "1.6" //nolint:unused // Body text (.md-typeset)
|
||||
lineHeightH1 = "1.3" //nolint:unused // H1 headings
|
||||
lineHeightH2 = "1.4" //nolint:unused // H2 headings
|
||||
lineHeightH3 = "1.5" //nolint:unused // H3 headings
|
||||
lineHeightCode = "1.4" //nolint:unused // Code blocks (pre)
|
||||
lineHeightBase = "1.6" //nolint:unused
|
||||
lineHeightH1 = "1.3" //nolint:unused
|
||||
lineHeightH2 = "1.4" //nolint:unused
|
||||
lineHeightH3 = "1.5" //nolint:unused
|
||||
lineHeightCode = "1.4" //nolint:unused
|
||||
)
|
||||
|
||||
// Responsive Container Component
|
||||
// Creates a centered container with responsive padding and max-width.
|
||||
// Mobile-first approach: starts at 100% width with padding, constrains on larger screens.
|
||||
//
|
||||
//nolint:unused // Reserved for future use in Phase 4.
|
||||
//nolint:unused
|
||||
func responsiveContainer(children ...elem.Node) *elem.Element {
|
||||
return elem.Div(attrs.Props{
|
||||
attrs.Style: styles.Props{
|
||||
|
|
@ -100,7 +100,7 @@ func responsiveContainer(children ...elem.Node) *elem.Element {
|
|||
// - title: Optional title for the card (empty string for no title)
|
||||
// - children: Content elements to display in the card
|
||||
//
|
||||
//nolint:unused // Reserved for future use in Phase 4.
|
||||
//nolint:unused
|
||||
func card(title string, children ...elem.Node) *elem.Element {
|
||||
cardContent := children
|
||||
if title != "" {
|
||||
|
|
@ -134,7 +134,7 @@ func card(title string, children ...elem.Node) *elem.Element {
|
|||
// EXTRACTED FROM: .md-typeset pre CSS rules
|
||||
// Exact styling from Material for MkDocs documentation.
|
||||
//
|
||||
//nolint:unused // Used across apple.go, windows.go, register_web.go templates.
|
||||
//nolint:unused
|
||||
func codeBlock(code string) *elem.Element {
|
||||
return elem.Pre(attrs.Props{
|
||||
attrs.Style: styles.Props{
|
||||
|
|
@ -164,7 +164,7 @@ func codeBlock(code string) *elem.Element {
|
|||
// Returns inline styles for the main content container that matches .md-typeset.
|
||||
// EXTRACTED FROM: .md-typeset CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used in general.go for mdTypesetBody.
|
||||
//nolint:unused
|
||||
func baseTypesetStyles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.FontSize: fontSizeBase, // 0.8rem
|
||||
|
|
@ -180,7 +180,7 @@ func baseTypesetStyles() styles.Props {
|
|||
// Returns inline styles for H1 headings that match .md-typeset h1.
|
||||
// EXTRACTED FROM: .md-typeset h1 CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used across templates for main headings.
|
||||
//nolint:unused
|
||||
func h1Styles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.Color: colorTextSecondary, // rgba(0, 0, 0, 0.54)
|
||||
|
|
@ -198,7 +198,7 @@ func h1Styles() styles.Props {
|
|||
// Returns inline styles for H2 headings that match .md-typeset h2.
|
||||
// EXTRACTED FROM: .md-typeset h2 CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used across templates for section headings.
|
||||
//nolint:unused
|
||||
func h2Styles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.FontSize: fontSizeH2, // 1.5625em
|
||||
|
|
@ -216,7 +216,7 @@ func h2Styles() styles.Props {
|
|||
// Returns inline styles for H3 headings that match .md-typeset h3.
|
||||
// EXTRACTED FROM: .md-typeset h3 CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used across templates for subsection headings.
|
||||
//nolint:unused
|
||||
func h3Styles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.FontSize: fontSizeH3, // 1.25em
|
||||
|
|
@ -234,7 +234,7 @@ func h3Styles() styles.Props {
|
|||
// Returns inline styles for paragraphs that match .md-typeset p.
|
||||
// EXTRACTED FROM: .md-typeset p CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used for consistent paragraph spacing.
|
||||
//nolint:unused
|
||||
func paragraphStyles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.Margin: "1em 0",
|
||||
|
|
@ -250,7 +250,7 @@ func paragraphStyles() styles.Props {
|
|||
// Returns inline styles for ordered lists that match .md-typeset ol.
|
||||
// EXTRACTED FROM: .md-typeset ol CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used for numbered instruction lists.
|
||||
//nolint:unused
|
||||
func orderedListStyles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.MarginBottom: "1em",
|
||||
|
|
@ -268,7 +268,7 @@ func orderedListStyles() styles.Props {
|
|||
// Returns inline styles for unordered lists that match .md-typeset ul.
|
||||
// EXTRACTED FROM: .md-typeset ul CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used for bullet point lists.
|
||||
//nolint:unused
|
||||
func unorderedListStyles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.MarginBottom: "1em",
|
||||
|
|
@ -287,7 +287,7 @@ func unorderedListStyles() styles.Props {
|
|||
// EXTRACTED FROM: .md-typeset a CSS rule from Material for MkDocs.
|
||||
// Note: Hover states cannot be implemented with inline styles.
|
||||
//
|
||||
//nolint:unused // Used for text links.
|
||||
//nolint:unused
|
||||
func linkStyles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.Color: colorPrimaryAccent, // #4051b5 - var(--md-primary-fg-color)
|
||||
|
|
@ -301,7 +301,7 @@ func linkStyles() styles.Props {
|
|||
// Returns inline styles for inline code that matches .md-typeset code.
|
||||
// EXTRACTED FROM: .md-typeset code CSS rule from Material for MkDocs.
|
||||
//
|
||||
//nolint:unused // Used for inline code snippets.
|
||||
//nolint:unused
|
||||
func inlineCodeStyles() styles.Props {
|
||||
return styles.Props{
|
||||
styles.BackgroundColor: colorCodeBg, // #f5f5f5
|
||||
|
|
@ -317,7 +317,7 @@ func inlineCodeStyles() styles.Props {
|
|||
// Inline Code Component
|
||||
// For inline code snippets within text.
|
||||
//
|
||||
//nolint:unused // Reserved for future inline code usage.
|
||||
//nolint:unused
|
||||
func inlineCode(code string) *elem.Element {
|
||||
return elem.Code(attrs.Props{
|
||||
attrs.Style: inlineCodeStyles().ToInline(),
|
||||
|
|
@ -327,7 +327,7 @@ func inlineCode(code string) *elem.Element {
|
|||
// orDivider creates a visual "or" divider between sections.
|
||||
// Styled with lines on either side for better visual separation.
|
||||
//
|
||||
//nolint:unused // Used in apple.go template.
|
||||
//nolint:unused
|
||||
func orDivider() *elem.Element {
|
||||
return elem.Div(attrs.Props{
|
||||
attrs.Style: styles.Props{
|
||||
|
|
@ -367,7 +367,7 @@ func orDivider() *elem.Element {
|
|||
|
||||
// warningBox creates a warning message box with icon and content.
|
||||
//
|
||||
//nolint:unused // Used in apple.go template.
|
||||
//nolint:unused
|
||||
func warningBox(title, message string) *elem.Element {
|
||||
return elem.Div(attrs.Props{
|
||||
attrs.Style: styles.Props{
|
||||
|
|
@ -404,7 +404,7 @@ func warningBox(title, message string) *elem.Element {
|
|||
|
||||
// downloadButton creates a nice button-style link for downloads.
|
||||
//
|
||||
//nolint:unused // Used in apple.go template.
|
||||
//nolint:unused
|
||||
func downloadButton(href, text string) *elem.Element {
|
||||
return elem.A(attrs.Props{
|
||||
attrs.Href: href,
|
||||
|
|
@ -428,7 +428,7 @@ func downloadButton(href, text string) *elem.Element {
|
|||
// Creates a link with proper security attributes for external URLs.
|
||||
// Automatically adds rel="noreferrer noopener" and target="_blank".
|
||||
//
|
||||
//nolint:unused // Used in apple.go, oidc_callback.go templates.
|
||||
//nolint:unused
|
||||
func externalLink(href, text string) *elem.Element {
|
||||
return elem.A(attrs.Props{
|
||||
attrs.Href: href,
|
||||
|
|
@ -444,7 +444,7 @@ func externalLink(href, text string) *elem.Element {
|
|||
// Instruction Step Component
|
||||
// For numbered instruction lists with consistent formatting.
|
||||
//
|
||||
//nolint:unused // Reserved for future use in Phase 4.
|
||||
//nolint:unused
|
||||
func instructionStep(_ int, text string) *elem.Element {
|
||||
return elem.Li(attrs.Props{
|
||||
attrs.Style: styles.Props{
|
||||
|
|
@ -457,7 +457,7 @@ func instructionStep(_ int, text string) *elem.Element {
|
|||
// Status Message Component
|
||||
// For displaying success/error/info messages with appropriate styling.
|
||||
//
|
||||
//nolint:unused // Reserved for future use in Phase 4.
|
||||
//nolint:unused
|
||||
func statusMessage(message string, isSuccess bool) *elem.Element {
|
||||
bgColor := colorSuccessLight
|
||||
textColor := colorSuccess
|
||||
|
|
|
|||
|
|
@ -916,6 +916,7 @@ func LoadCLIConfig() (*Config, error) {
|
|||
// LoadServerConfig returns the full Headscale configuration to
|
||||
// host a Headscale server. This is called as part of `headscale serve`.
|
||||
func LoadServerConfig() (*Config, error) {
|
||||
//nolint:noinlineerr
|
||||
if err := validateServerConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -234,6 +234,7 @@ func (node *Node) RequestTags() []string {
|
|||
}
|
||||
|
||||
func (node *Node) Prefixes() []netip.Prefix {
|
||||
//nolint:prealloc
|
||||
var addrs []netip.Prefix
|
||||
for _, nodeAddress := range node.IPs() {
|
||||
ip := netip.PrefixFrom(nodeAddress, nodeAddress.BitLen())
|
||||
|
|
@ -263,6 +264,7 @@ func (node *Node) IsExitNode() bool {
|
|||
}
|
||||
|
||||
func (node *Node) IPsAsString() []string {
|
||||
//nolint:prealloc
|
||||
var ret []string
|
||||
|
||||
for _, ip := range node.IPs() {
|
||||
|
|
@ -925,11 +927,11 @@ func (nv NodeView) TailscaleUserID() tailcfg.UserID {
|
|||
}
|
||||
|
||||
if nv.IsTagged() {
|
||||
//nolint:gosec // G115: TaggedDevices.ID is a constant that fits in int64
|
||||
//nolint:gosec
|
||||
return tailcfg.UserID(int64(TaggedDevices.ID))
|
||||
}
|
||||
|
||||
//nolint:gosec // G115: UserID values are within int64 range
|
||||
//nolint:gosec
|
||||
return tailcfg.UserID(int64(nv.UserID().Get()))
|
||||
}
|
||||
|
||||
|
|
@ -1054,7 +1056,7 @@ func (nv NodeView) TailNode(
|
|||
}
|
||||
|
||||
tNode := tailcfg.Node{
|
||||
//nolint:gosec // G115: NodeID values are within int64 range
|
||||
//nolint:gosec
|
||||
ID: tailcfg.NodeID(nv.ID()),
|
||||
StableID: nv.ID().StableID(),
|
||||
Name: hostname,
|
||||
|
|
|
|||
|
|
@ -407,7 +407,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
|||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
|
|
@ -491,7 +491,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
|||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "server-北京-01",
|
||||
Hostname: "server-北京-01", //nolint:gosmopolitan
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
|
|
@ -505,7 +505,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
|||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
|
|
@ -533,7 +533,7 @@ func TestApplyHostnameFromHostInfo(t *testing.T) {
|
|||
Hostname: "valid-hostname",
|
||||
},
|
||||
change: &tailcfg.Hostinfo{
|
||||
Hostname: "测试💻机器",
|
||||
Hostname: "测试💻机器", //nolint:gosmopolitan
|
||||
},
|
||||
want: Node{
|
||||
GivenName: "valid-hostname",
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ func (u UserView) ID() uint {
|
|||
|
||||
func (u *User) TailscaleLogin() tailcfg.Login {
|
||||
return tailcfg.Login{
|
||||
ID: tailcfg.LoginID(u.ID), //nolint:gosec // G115: User IDs are always positive and fit in int64
|
||||
ID: tailcfg.LoginID(u.ID), //nolint:gosec
|
||||
Provider: u.Provider,
|
||||
LoginName: u.Username(),
|
||||
DisplayName: u.Display(),
|
||||
|
|
@ -277,8 +277,10 @@ func (c *OIDCClaims) Identifier() string {
|
|||
|
||||
var result string
|
||||
// Try to parse as URL to handle URL joining correctly
|
||||
//nolint:noinlineerr
|
||||
if u, err := url.Parse(issuer); err == nil && u.Scheme != "" {
|
||||
// For URLs, use proper URL path joining
|
||||
//nolint:noinlineerr
|
||||
if joined, err := url.JoinPath(issuer, subject); err == nil {
|
||||
result = joined
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,9 +38,7 @@ func (v *VersionInfo) String() string {
|
|||
return sb.String()
|
||||
}
|
||||
|
||||
var buildInfo = sync.OnceValues(func() (*debug.BuildInfo, bool) {
|
||||
return debug.ReadBuildInfo()
|
||||
})
|
||||
var buildInfo = sync.OnceValues(debug.ReadBuildInfo)
|
||||
|
||||
var GetVersionInfo = sync.OnceValue(func() *VersionInfo {
|
||||
info := &VersionInfo{
|
||||
|
|
|
|||
|
|
@ -185,6 +185,7 @@ func ParseTraceroute(output string) (Traceroute, error) {
|
|||
firstSpace := strings.Index(remainder, " ")
|
||||
if firstSpace > 0 {
|
||||
firstPart := remainder[:firstSpace]
|
||||
//nolint:noinlineerr
|
||||
if _, err := strconv.ParseFloat(strings.TrimPrefix(firstPart, "<"), 64); err == nil {
|
||||
latencyFirst = true
|
||||
}
|
||||
|
|
@ -233,6 +234,7 @@ func ParseTraceroute(output string) (Traceroute, error) {
|
|||
parts := strings.Fields(remainder)
|
||||
if len(parts) > 0 {
|
||||
hopHostname = parts[0]
|
||||
//nolint:noinlineerr
|
||||
if ip, err := netip.ParseAddr(parts[0]); err == nil {
|
||||
hopIP = ip
|
||||
}
|
||||
|
|
@ -336,7 +338,7 @@ func EnsureHostname(hostinfo *tailcfg.Hostinfo, machineKey, nodeKey string) stri
|
|||
// it's purely for observability and correlating log entries during the registration process.
|
||||
func GenerateRegistrationKey() (string, error) {
|
||||
const (
|
||||
registerKeyPrefix = "hskey-reg-" //nolint:gosec // This is a vanity key for logging, not a credential
|
||||
registerKeyPrefix = "hskey-reg-" //nolint:gosec
|
||||
registerKeyLength = 64
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -902,7 +902,7 @@ func TestEnsureHostname(t *testing.T) {
|
|||
{
|
||||
name: "hostname_with_unicode",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "node-ñoño-测试",
|
||||
Hostname: "node-ñoño-测试", //nolint:gosmopolitan
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
|
|
@ -983,7 +983,7 @@ func TestEnsureHostname(t *testing.T) {
|
|||
{
|
||||
name: "chinese_chars_with_dash_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "server-北京-01",
|
||||
Hostname: "server-北京-01", //nolint:gosmopolitan
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
|
|
@ -992,7 +992,7 @@ func TestEnsureHostname(t *testing.T) {
|
|||
{
|
||||
name: "chinese_only_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "我的电脑",
|
||||
Hostname: "我的电脑", //nolint:gosmopolitan
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
|
|
@ -1010,7 +1010,7 @@ func TestEnsureHostname(t *testing.T) {
|
|||
{
|
||||
name: "mixed_chinese_emoji_invalid",
|
||||
hostinfo: &tailcfg.Hostinfo{
|
||||
Hostname: "测试💻机器",
|
||||
Hostname: "测试💻机器", //nolint:gosmopolitan
|
||||
},
|
||||
machineKey: "mkey12345678",
|
||||
nodeKey: "nkey12345678",
|
||||
|
|
@ -1173,6 +1173,7 @@ func TestEnsureHostnameWithHostinfo(t *testing.T) {
|
|||
t.Fatal("hostinfo should not be nil")
|
||||
}
|
||||
|
||||
//nolint:goconst
|
||||
if hi.Hostname != "unknown-node" {
|
||||
t.Errorf("hostname = %v, want unknown-node", hi.Hostname)
|
||||
}
|
||||
|
|
@ -1283,6 +1284,8 @@ func TestEnsureHostname_DNSLabelLimit(t *testing.T) {
|
|||
|
||||
for i, hostname := range testCases {
|
||||
t.Run(cmp.Diff("", ""), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
hostinfo := &tailcfg.Hostinfo{Hostname: hostname}
|
||||
|
||||
result := EnsureHostname(hostinfo, "mkey", "nkey")
|
||||
|
|
|
|||
|
|
@ -1876,7 +1876,7 @@ func TestACLAutogroupSelf(t *testing.T) {
|
|||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(t, result, "user1 should not be able to access user2's regular devices (autogroup:self isolation)")
|
||||
assert.Error(t, err, "connection from user1 to user2 regular device should fail")
|
||||
require.Error(t, err, "connection from user1 to user2 regular device should fail")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1895,6 +1895,7 @@ func TestACLAutogroupSelf(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func TestACLPolicyPropagationOverTime(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
|
|
|||
|
|
@ -218,7 +218,7 @@ func TestAPIAuthenticationBypass(t *testing.T) {
|
|||
var response v1.ListUsersResponse
|
||||
|
||||
err = protojson.Unmarshal(body, &response)
|
||||
assert.NoError(t, err, "Response should be valid protobuf JSON with valid API key")
|
||||
require.NoError(t, err, "Response should be valid protobuf JSON with valid API key")
|
||||
|
||||
// Should contain our test users
|
||||
users := response.GetUsers()
|
||||
|
|
@ -486,7 +486,7 @@ func TestGRPCAuthenticationBypass(t *testing.T) {
|
|||
)
|
||||
|
||||
// Should fail with authentication error
|
||||
assert.Error(t, err,
|
||||
require.Error(t, err,
|
||||
"gRPC connection with invalid API key should fail")
|
||||
|
||||
// Should contain authentication error message
|
||||
|
|
@ -515,7 +515,7 @@ func TestGRPCAuthenticationBypass(t *testing.T) {
|
|||
)
|
||||
|
||||
// Should succeed
|
||||
assert.NoError(t, err,
|
||||
require.NoError(t, err,
|
||||
"gRPC connection with valid API key should succeed, output: %s", output)
|
||||
|
||||
// CLI outputs the users array directly, not wrapped in ListUsersResponse
|
||||
|
|
@ -523,7 +523,7 @@ func TestGRPCAuthenticationBypass(t *testing.T) {
|
|||
var users []*v1.User
|
||||
|
||||
err = json.Unmarshal([]byte(output), &users)
|
||||
assert.NoError(t, err, "Response should be valid JSON array")
|
||||
require.NoError(t, err, "Response should be valid JSON array")
|
||||
assert.Len(t, users, 2, "Should have 2 users")
|
||||
|
||||
userNames := make([]string, len(users))
|
||||
|
|
@ -640,7 +640,7 @@ cli:
|
|||
)
|
||||
|
||||
// Should fail
|
||||
assert.Error(t, err,
|
||||
require.Error(t, err,
|
||||
"CLI with invalid API key should fail")
|
||||
|
||||
// Should indicate authentication failure
|
||||
|
|
@ -675,7 +675,7 @@ cli:
|
|||
)
|
||||
|
||||
// Should succeed
|
||||
assert.NoError(t, err,
|
||||
require.NoError(t, err,
|
||||
"CLI with valid API key should succeed")
|
||||
|
||||
// CLI outputs the users array directly, not wrapped in ListUsersResponse
|
||||
|
|
@ -683,7 +683,7 @@ cli:
|
|||
var users []*v1.User
|
||||
|
||||
err = json.Unmarshal([]byte(output), &users)
|
||||
assert.NoError(t, err, "Response should be valid JSON array")
|
||||
require.NoError(t, err, "Response should be valid JSON array")
|
||||
assert.Len(t, users, 2, "Should have 2 users")
|
||||
|
||||
userNames := make([]string, len(users))
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
|||
// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38
|
||||
// https://github.com/juanfont/headscale/issues/2164
|
||||
if !https {
|
||||
//nolint:forbidigo // Intentional delay: Tailscale client requires 5 min wait before reconnecting over non-HTTPS
|
||||
//nolint:forbidigo
|
||||
time.Sleep(5 * time.Minute)
|
||||
}
|
||||
|
||||
|
|
@ -453,7 +453,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
|||
// https://github.com/tailscale/tailscale/commit/1eaad7d3deb0815e8932e913ca1a862afa34db38
|
||||
// https://github.com/juanfont/headscale/issues/2164
|
||||
if !https {
|
||||
//nolint:forbidigo // Intentional delay: Tailscale client requires 5 min wait before reconnecting over non-HTTPS
|
||||
//nolint:forbidigo
|
||||
time.Sleep(5 * time.Minute)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -926,7 +926,7 @@ func TestOIDCFollowUpUrl(t *testing.T) {
|
|||
|
||||
// wait for the registration cache to expire
|
||||
// a little bit more than HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION (1m30s)
|
||||
//nolint:forbidigo // Intentional delay: must wait for real-time cache expiration (HEADSCALE_TUNING_REGISTER_CACHE_EXPIRATION=1m30s)
|
||||
//nolint:forbidigo
|
||||
time.Sleep(2 * time.Minute)
|
||||
|
||||
var newUrl *url.URL
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ func TestUserCommand(t *testing.T) {
|
|||
"--identifier=1",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterIDDelete []*v1.User
|
||||
|
|
@ -245,7 +245,7 @@ func TestUserCommand(t *testing.T) {
|
|||
"--name=newname",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterNameDelete []v1.User
|
||||
|
|
@ -571,8 +571,9 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
|||
func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
//nolint:goconst
|
||||
user1 := "user1"
|
||||
user2 := "user2"
|
||||
user2 := "user2" //nolint:goconst
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
|
|
@ -829,7 +830,7 @@ func TestApiKeyCommand(t *testing.T) {
|
|||
"json",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, apiResult)
|
||||
|
||||
keys[idx] = apiResult
|
||||
|
|
@ -907,7 +908,7 @@ func TestApiKeyCommand(t *testing.T) {
|
|||
listedAPIKeys[idx].GetPrefix(),
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
expiredPrefixes[listedAPIKeys[idx].GetPrefix()] = true
|
||||
}
|
||||
|
|
@ -952,7 +953,7 @@ func TestApiKeyCommand(t *testing.T) {
|
|||
"--prefix",
|
||||
listedAPIKeys[0].GetPrefix(),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var listedAPIKeysAfterDelete []v1.ApiKey
|
||||
|
||||
|
|
@ -1071,7 +1072,7 @@ func TestNodeCommand(t *testing.T) {
|
|||
}
|
||||
nodes := make([]*v1.Node, len(regIDs))
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for index, regID := range regIDs {
|
||||
_, err := headscale.Execute(
|
||||
|
|
@ -1089,7 +1090,7 @@ func TestNodeCommand(t *testing.T) {
|
|||
"json",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
|
|
@ -1156,7 +1157,7 @@ func TestNodeCommand(t *testing.T) {
|
|||
}
|
||||
otherUserMachines := make([]*v1.Node, len(otherUserRegIDs))
|
||||
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
for index, regID := range otherUserRegIDs {
|
||||
_, err := headscale.Execute(
|
||||
|
|
@ -1174,7 +1175,7 @@ func TestNodeCommand(t *testing.T) {
|
|||
"json",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
|
|
@ -1281,7 +1282,7 @@ func TestNodeCommand(t *testing.T) {
|
|||
"--force",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test: list main user after node is deleted
|
||||
var listOnlyMachineUserAfterDelete []v1.Node
|
||||
|
|
@ -1348,7 +1349,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
|||
"json",
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
|
||||
|
|
@ -1411,7 +1412,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
|||
strconv.FormatUint(listAll[idx].GetId(), 10),
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var listAllAfterExpiry []v1.Node
|
||||
|
|
@ -1549,7 +1550,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
|||
fmt.Sprintf("newnode-%d", idx+1),
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, res, "Node renamed")
|
||||
}
|
||||
|
|
@ -1590,7 +1591,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
|||
strings.Repeat("t", 64),
|
||||
},
|
||||
)
|
||||
assert.ErrorContains(t, err, "must not exceed 63 characters")
|
||||
require.ErrorContains(t, err, "must not exceed 63 characters")
|
||||
|
||||
var listAllAfterRenameAttempt []v1.Node
|
||||
|
||||
|
|
@ -1763,7 +1764,7 @@ func TestPolicyBrokenConfigCommand(t *testing.T) {
|
|||
policyFilePath,
|
||||
},
|
||||
)
|
||||
assert.ErrorContains(t, err, `invalid action "unknown-action"`)
|
||||
require.ErrorContains(t, err, `invalid action "unknown-action"`)
|
||||
|
||||
// The new policy was invalid, the old one should still be in place, which
|
||||
// is none.
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ func DERPVerify(
|
|||
result = fmt.Errorf("client Connect: %w", err)
|
||||
}
|
||||
|
||||
//nolint:noinlineerr
|
||||
if m, err := c.Recv(); err != nil {
|
||||
result = fmt.Errorf("client first Recv: %w", err)
|
||||
} else if v, ok := m.(derp.ServerInfoMessage); !ok {
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
|||
|
||||
const erPath = "/tmp/extra_records.json"
|
||||
|
||||
//nolint:prealloc
|
||||
extraRecords := []tailcfg.DNSRecord{
|
||||
{
|
||||
Name: "test.myvpn.example.com",
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ type buffer struct {
|
|||
|
||||
// Write appends the contents of p to the buffer, growing the buffer as needed. It returns
|
||||
// the number of bytes written.
|
||||
//
|
||||
//nolint:nonamedreturns
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func TestDERPServerWebsocketScenario(t *testing.T) {
|
|||
// we *want* it to show up in stacktraces,
|
||||
// so marking it as a test helper would be counterproductive.
|
||||
//
|
||||
//nolint:thelper
|
||||
|
||||
func derpServerScenario(
|
||||
t *testing.T,
|
||||
spec ScenarioSpec,
|
||||
|
|
@ -179,7 +179,7 @@ func derpServerScenario(
|
|||
|
||||
// Let the DERP updater run a couple of times to ensure it does not
|
||||
// break the DERPMap. The updater runs on a 10s interval by default.
|
||||
//nolint:forbidigo // Intentional delay: must wait for DERP updater to run multiple times (interval-based)
|
||||
//nolint:forbidigo
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
success = pingDerpAllHelper(t, allClients, allHostnames)
|
||||
|
|
|
|||
|
|
@ -174,9 +174,10 @@ func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNode
|
|||
|
||||
startTime := time.Now()
|
||||
|
||||
//nolint:goconst
|
||||
stateStr := "offline"
|
||||
if expectedOnline {
|
||||
stateStr = "online"
|
||||
stateStr = "online" //nolint:goconst
|
||||
}
|
||||
|
||||
t.Logf("requireAllSystemsOnline: Starting %s validation for %d nodes at %s - %s", stateStr, len(expectedNodes), startTime.Format(TimestampFormat), message)
|
||||
|
|
@ -194,6 +195,8 @@ func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNode
|
|||
}
|
||||
|
||||
// requireAllClientsOnlineWithSingleTimeout is the original validation logic for online state.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func requireAllClientsOnlineWithSingleTimeout(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
|
|
@ -548,6 +551,8 @@ func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expe
|
|||
// assertLastSeenSet validates that a node has a non-nil LastSeen timestamp.
|
||||
// Critical for ensuring node activity tracking is functioning properly.
|
||||
func assertLastSeenSet(t *testing.T, node *v1.Node) {
|
||||
t.Helper()
|
||||
|
||||
assert.NotNil(t, node)
|
||||
assert.NotNil(t, node.GetLastSeen())
|
||||
}
|
||||
|
|
@ -566,7 +571,7 @@ func assertTailscaleNodesLogout(t assert.TestingT, clients []TailscaleClient) {
|
|||
|
||||
for _, client := range clients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(t, err, "failed to get status for client %s", client.Hostname())
|
||||
assert.NoError(t, err, "failed to get status for client %s", client.Hostname()) //nolint:testifylint
|
||||
assert.Equal(t, "NeedsLogin", status.BackendState,
|
||||
"client %s should be logged out", client.Hostname())
|
||||
}
|
||||
|
|
@ -765,7 +770,7 @@ func tagp(name string) policyv2.Alias {
|
|||
// prefixp returns a pointer to a Prefix from a CIDR string for policy v2 configurations.
|
||||
// Converts CIDR notation to policy prefix format for network range specifications.
|
||||
func prefixp(cidr string) policyv2.Alias {
|
||||
//nolint:staticcheck // SA4006: prefix is used in new(policyv2.Prefix(prefix)) below
|
||||
//nolint:staticcheck
|
||||
prefix := netip.MustParsePrefix(cidr)
|
||||
return new(policyv2.Prefix(prefix))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -781,13 +781,14 @@ func extractTarToDirectory(tarData []byte, targetDir string) error {
|
|||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory
|
||||
//nolint:gosec // G115: tar.Header.Mode is int64, safe to convert to uint32 for permissions
|
||||
//nolint:gosec
|
||||
err := os.MkdirAll(targetPath, os.FileMode(header.Mode))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
|
||||
}
|
||||
case tar.TypeReg:
|
||||
// Ensure parent directories exist
|
||||
//nolint:noinlineerr
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), dirPermissions); err != nil {
|
||||
return fmt.Errorf("failed to create parent directories for %s: %w", targetPath, err)
|
||||
}
|
||||
|
|
@ -798,7 +799,7 @@ func extractTarToDirectory(tarData []byte, targetDir string) error {
|
|||
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
//nolint:gosec // G110: Trusted tar archive from our own container
|
||||
//nolint:gosec,noinlineerr
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to copy file contents: %w", err)
|
||||
|
|
@ -807,7 +808,7 @@ func extractTarToDirectory(tarData []byte, targetDir string) error {
|
|||
outFile.Close()
|
||||
|
||||
// Set file permissions
|
||||
//nolint:gosec // G115: tar.Header.Mode is int64, safe to convert to uint32 for permissions
|
||||
//nolint:gosec,noinlineerr
|
||||
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to set file permissions: %w", err)
|
||||
}
|
||||
|
|
@ -906,7 +907,7 @@ func (t *HeadscaleInContainer) SaveDatabase(savePath string) error {
|
|||
return fmt.Errorf("failed to create database file: %w", err)
|
||||
}
|
||||
|
||||
//nolint:gosec // G110: Trusted tar archive from our own container
|
||||
//nolint:gosec
|
||||
written, err := io.Copy(outFile, tarReader)
|
||||
outFile.Close()
|
||||
|
||||
|
|
@ -1593,6 +1594,7 @@ func (t *HeadscaleInContainer) GetAllMapReponses() (map[types.NodeID][]tailcfg.M
|
|||
}
|
||||
|
||||
var res map[types.NodeID][]tailcfg.MapResponse
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal([]byte(result), &res); err != nil {
|
||||
return nil, fmt.Errorf("decoding routes response: %w", err)
|
||||
}
|
||||
|
|
@ -1613,6 +1615,7 @@ func (t *HeadscaleInContainer) PrimaryRoutes() (*routes.DebugRoutes, error) {
|
|||
}
|
||||
|
||||
var debugRoutes routes.DebugRoutes
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal([]byte(result), &debugRoutes); err != nil {
|
||||
return nil, fmt.Errorf("decoding routes response: %w", err)
|
||||
}
|
||||
|
|
@ -1633,6 +1636,7 @@ func (t *HeadscaleInContainer) DebugBatcher() (*hscontrol.DebugBatcherInfo, erro
|
|||
}
|
||||
|
||||
var debugInfo hscontrol.DebugBatcherInfo
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal([]byte(result), &debugInfo); err != nil {
|
||||
return nil, fmt.Errorf("decoding batcher debug response: %w", err)
|
||||
}
|
||||
|
|
@ -1653,6 +1657,7 @@ func (t *HeadscaleInContainer) DebugNodeStore() (map[types.NodeID]types.Node, er
|
|||
}
|
||||
|
||||
var nodeStore map[types.NodeID]types.Node
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal([]byte(result), &nodeStore); err != nil {
|
||||
return nil, fmt.Errorf("decoding nodestore debug response: %w", err)
|
||||
}
|
||||
|
|
@ -1673,6 +1678,7 @@ func (t *HeadscaleInContainer) DebugFilter() ([]tailcfg.FilterRule, error) {
|
|||
}
|
||||
|
||||
var filterRules []tailcfg.FilterRule
|
||||
//nolint:noinlineerr
|
||||
if err := json.Unmarshal([]byte(result), &filterRules); err != nil {
|
||||
return nil, fmt.Errorf("decoding filter response: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -220,19 +220,19 @@ func BuildExpectedOnlineMap(all map[types.NodeID][]tailcfg.MapResponse) map[type
|
|||
for _, mr := range mrs {
|
||||
for _, peer := range mr.Peers {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online //nolint:gosec // G115: tailcfg.NodeID is int64, safe for test code
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online //nolint:gosec
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range mr.PeersChanged {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online //nolint:gosec // G115: tailcfg.NodeID is int64, safe for test code
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online //nolint:gosec
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range mr.PeersChangedPatch {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.NodeID)] = *peer.Online //nolint:gosec // G115: tailcfg.NodeID is int64, safe for test code
|
||||
res[nid][types.NodeID(peer.NodeID)] = *peer.Online //nolint:gosec
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1610,7 +1610,7 @@ func TestSubnetRouteACL(t *testing.T) {
|
|||
func TestEnablingExitRoutes(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
user := "user2"
|
||||
user := "user2" //nolint:goconst
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 2,
|
||||
|
|
@ -2040,6 +2040,8 @@ func MustFindNode(hostname string, nodes []*v1.Node) *v1.Node {
|
|||
// - Verify that peers can no longer use node
|
||||
// - Policy is changed back to auto approve route, check that routes already existing is approved.
|
||||
// - Verify that routes can now be seen by peers.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
|
@ -2887,7 +2889,8 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
|||
}
|
||||
|
||||
// assertTracerouteViaIPWithCollect is a version of assertTracerouteViaIP that works with assert.CollectT.
|
||||
//nolint:testifylint // CollectT requires assert, not require
|
||||
//
|
||||
//nolint:testifylint
|
||||
func assertTracerouteViaIPWithCollect(c *assert.CollectT, tr util.Traceroute, ip netip.Addr) {
|
||||
assert.NotNil(c, tr)
|
||||
assert.True(c, tr.Success)
|
||||
|
|
@ -2905,6 +2908,8 @@ func SortPeerStatus(a, b *ipnstate.PeerStatus) int {
|
|||
}
|
||||
|
||||
func printCurrentRouteMap(t *testing.T, routers ...*ipnstate.PeerStatus) {
|
||||
t.Helper()
|
||||
|
||||
t.Logf("== Current routing map ==")
|
||||
slices.SortFunc(routers, SortPeerStatus)
|
||||
|
||||
|
|
|
|||
|
|
@ -323,6 +323,8 @@ func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) {
|
|||
}
|
||||
|
||||
func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
defer func() { _ = dockertestutil.CleanUnreferencedNetworks(s.pool) }()
|
||||
defer func() { _ = dockertestutil.CleanImagesInCI(s.pool) }()
|
||||
|
||||
|
|
@ -1172,6 +1174,7 @@ func (s *Scenario) runHeadscaleRegister(userStr string, body string) error {
|
|||
key = strings.SplitN(key, " ", expectedHTMLSplitParts)[0]
|
||||
log.Printf("registering node %s", key)
|
||||
|
||||
//nolint:noinlineerr
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
_, err = headscale.Execute(
|
||||
[]string{"headscale", "nodes", "register", "--user", userStr, "--key", key},
|
||||
|
|
@ -1449,6 +1452,7 @@ func (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUse
|
|||
// Add integration test labels if running under hi tool
|
||||
dockertestutil.DockerAddIntegrationLabels(mockOidcOptions, "oidc")
|
||||
|
||||
//nolint:noinlineerr
|
||||
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
mockOidcOptions,
|
||||
|
|
@ -1468,6 +1472,7 @@ func (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUse
|
|||
|
||||
hostEndpoint := net.JoinHostPort(ipAddr, strconv.Itoa(port))
|
||||
|
||||
//nolint:noinlineerr
|
||||
if err := s.pool.Retry(func() error {
|
||||
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
||||
httpClient := &http.Client{}
|
||||
|
|
|
|||
|
|
@ -452,7 +452,7 @@ func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient
|
|||
|
||||
func assertSSHNoAccessStdError(t *testing.T, err error, stderr string) {
|
||||
t.Helper()
|
||||
assert.Error(t, err)
|
||||
require.Error(t, err)
|
||||
|
||||
if !isSSHNoAccessStdError(stderr) {
|
||||
t.Errorf("expected stderr output suggesting access denied, got: %s", stderr)
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ func assertNodeHasNoTagsWithCollect(c *assert.CollectT, node *v1.Node) {
|
|||
// This validates that tag updates have propagated to the node's own status (issue #2978).
|
||||
func assertNodeSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClient, expectedTags []string) {
|
||||
status, err := client.Status()
|
||||
//nolint:testifylint // must use assert with CollectT in EventuallyWithT
|
||||
//nolint:testifylint
|
||||
assert.NoError(c, err, "failed to get client status")
|
||||
|
||||
if status == nil || status.Self == nil {
|
||||
|
|
@ -556,7 +556,7 @@ func TestTagsAuthKeyWithTagAdminOverrideReauthPreserves(t *testing.T) {
|
|||
"--authkey=" + authKey.GetKey(),
|
||||
"--force-reauth",
|
||||
}
|
||||
//nolint:errcheck // Intentionally ignoring error - we check results below
|
||||
//nolint:errcheck
|
||||
client.Execute(command)
|
||||
|
||||
// Verify admin tags are preserved even after reauth - admin decisions are authoritative (server-side)
|
||||
|
|
@ -2490,7 +2490,7 @@ func TestTagsAdminAPICannotRemoveAllTags(t *testing.T) {
|
|||
// This validates at a deeper level than status - directly from tailscale debug netmap.
|
||||
func assertNetmapSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClient, expectedTags []string) {
|
||||
nm, err := client.Netmap()
|
||||
//nolint:testifylint // must use assert with CollectT in EventuallyWithT
|
||||
//nolint:testifylint
|
||||
assert.NoError(c, err, "failed to get client netmap")
|
||||
|
||||
if nm == nil {
|
||||
|
|
@ -2501,6 +2501,7 @@ func assertNetmapSelfHasTagsWithCollect(c *assert.CollectT, client TailscaleClie
|
|||
var actualTagsSlice []string
|
||||
|
||||
if nm.SelfNode.Valid() {
|
||||
//nolint:unqueryvet
|
||||
for _, tag := range nm.SelfNode.Tags().All() {
|
||||
actualTagsSlice = append(actualTagsSlice, tag)
|
||||
}
|
||||
|
|
@ -2623,7 +2624,7 @@ func TestTagsIssue2978ReproTagReplacement(t *testing.T) {
|
|||
// We wait 10 seconds and check - if the client STILL shows the OLD tag,
|
||||
// that demonstrates the bug. If the client shows the NEW tag, the bug is fixed.
|
||||
t.Log("Step 2b: Waiting 10 seconds to see if client self view updates (bug: it should NOT)")
|
||||
//nolint:forbidigo // intentional sleep to demonstrate bug timing - client should get update immediately, not after waiting
|
||||
//nolint:forbidigo
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Check client status after waiting
|
||||
|
|
@ -2646,6 +2647,7 @@ func TestTagsIssue2978ReproTagReplacement(t *testing.T) {
|
|||
var netmapTagsAfterFirstCall []string
|
||||
|
||||
if nmErr == nil && nm != nil && nm.SelfNode.Valid() {
|
||||
//nolint:unqueryvet
|
||||
for _, tag := range nm.SelfNode.Tags().All() {
|
||||
netmapTagsAfterFirstCall = append(netmapTagsAfterFirstCall, tag)
|
||||
}
|
||||
|
|
@ -2692,7 +2694,7 @@ func TestTagsIssue2978ReproTagReplacement(t *testing.T) {
|
|||
|
||||
// Wait and check - bug means client still shows old tag
|
||||
t.Log("Step 4b: Waiting 10 seconds to see if client self view updates (bug: it should NOT)")
|
||||
//nolint:forbidigo // intentional sleep to demonstrate bug timing - client should get update immediately, not after waiting
|
||||
//nolint:forbidigo
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
status, err = client.Status()
|
||||
|
|
|
|||
|
|
@ -1609,6 +1609,7 @@ func (t *TailscaleInContainer) GetNodePrivateKey() (*key.NodePrivate, error) {
|
|||
}
|
||||
|
||||
store := &mem.Store{}
|
||||
//nolint:noinlineerr
|
||||
if err = store.LoadFromJSON(state); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal state file: %w", err)
|
||||
}
|
||||
|
|
@ -1624,6 +1625,7 @@ func (t *TailscaleInContainer) GetNodePrivateKey() (*key.NodePrivate, error) {
|
|||
}
|
||||
|
||||
p := &ipn.Prefs{}
|
||||
//nolint:noinlineerr
|
||||
if err = json.Unmarshal(currentProfile, &p); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal current profile state: %w", err)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue