all: apply testifylint fixes and correct auto-fix issues

- Apply testifylint auto-fixes (assert.Positive, fmt.Sprintf in assertions)
- Fix incorrect := to = conversions introduced by auto-fixer
- Revert broken slices.AppendSeq FIXME placeholder
This commit is contained in:
Kristoffer Dalby 2026-01-20 16:20:53 +00:00
parent 12b3da0181
commit 8bfd508cf0
9 changed files with 143 additions and 91 deletions

View file

@ -70,7 +70,8 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
log.Printf("Running pre-test cleanup...")
}
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
err := cleanupBeforeTest(ctx)
if err != nil && config.Verbose {
log.Printf("Warning: pre-test cleanup failed: %v", err)
}
}
@ -123,7 +124,8 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
// Start stats collection immediately - no need for complex retry logic
// The new implementation monitors Docker events and will catch containers as they start
if err := statsCollector.StartCollection(ctx, runID, config.Verbose); err != nil {
err := statsCollector.StartCollection(ctx, runID, config.Verbose)
if err != nil {
if config.Verbose {
log.Printf("Warning: failed to start stats collection: %v", err)
}
@ -135,7 +137,8 @@ func runTestContainer(ctx context.Context, config *RunConfig) error {
exitCode, err := streamAndWait(ctx, cli, resp.ID)
// Ensure all containers have finished and logs are flushed before extracting artifacts
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose)
if waitErr != nil && config.Verbose {
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
}
@ -648,7 +651,8 @@ func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDi
for _, cont := range currentTestContainers {
// Extract container logs and tar files
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose)
if err != nil {
if verbose {
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
}
@ -727,7 +731,8 @@ func getCurrentTestContainers(containers []container.Summary, testContainerID st
// extractContainerArtifacts saves logs and tar files from a container.
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Ensure the logs directory exists
if err := os.MkdirAll(logsDir, dirPermissions); err != nil {
err := os.MkdirAll(logsDir, dirPermissions)
if err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}

View file

@ -79,15 +79,18 @@ func main() {
}
func cleanAll(ctx context.Context) error {
if err := killTestContainers(ctx); err != nil {
err := killTestContainers(ctx)
if err != nil {
return err
}
if err := pruneDockerNetworks(ctx); err != nil {
err = pruneDockerNetworks(ctx)
if err != nil {
return err
}
if err := cleanOldImages(ctx); err != nil {
err = cleanOldImages(ctx)
if err != nil {
return err
}

View file

@ -23,7 +23,7 @@ var ErrStatsCollectionAlreadyStarted = errors.New("stats collection already star
// Stats calculation constants.
const (
bytesPerKB = 1024
bytesPerKB = 1024
percentageMultiplier = 100.0
)
@ -259,7 +259,8 @@ func (sc *StatsCollector) collectStatsForContainer(ctx context.Context, containe
return
default:
var stats container.StatsResponse
if err := decoder.Decode(&stats); err != nil {
err := decoder.Decode(&stats)
if err != nil {
// EOF is expected when container stops or stream ends
if err.Error() != "EOF" && verbose {
log.Printf("Failed to decode stats for container %s: %v", containerID[:12], err)

View file

@ -36,7 +36,7 @@ var (
"node not found in registration cache",
)
ErrCouldNotConvertNodeInterface = errors.New("failed to convert node interface")
ErrNameNotUnique = errors.New("name is not unique")
ErrNameNotUnique = errors.New("name is not unique")
)
// ListPeers returns peers of node, regardless of any Policy or if the node is expired.
@ -229,7 +229,8 @@ func SetApprovedRoutes(
) error {
if len(routes) == 0 {
// if no routes are provided, we remove all
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error; err != nil {
err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error
if err != nil {
return fmt.Errorf("removing approved routes: %w", err)
}
@ -278,13 +279,15 @@ func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
func RenameNode(tx *gorm.DB,
nodeID types.NodeID, newName string,
) error {
if err := util.ValidateHostname(newName); err != nil {
err := util.ValidateHostname(newName)
if err != nil {
return fmt.Errorf("renaming node: %w", err)
}
// Check if the new name is unique
var count int64
if err := tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error; err != nil {
err = tx.Model(&types.Node{}).Where("given_name = ? AND id != ?", newName, nodeID).Count(&count).Error
if err != nil {
return fmt.Errorf("failed to check name uniqueness: %w", err)
}
@ -494,8 +497,9 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
func isUniqueName(tx *gorm.DB, name string) (bool, error) {
nodes := types.Nodes{}
if err := tx.
Where("given_name = ?", name).Find(&nodes).Error; err != nil {
err := tx.
Where("given_name = ?", name).Find(&nodes).Error
if err != nil {
return false, err
}

View file

@ -38,12 +38,12 @@ var ErrUndefinedTagReference = errors.New("references undefined tag")
// Sentinel errors for type/alias validation.
var (
ErrUnknownAliasType = errors.New("unknown alias type")
ErrUnknownOwnerType = errors.New("unknown owner type")
ErrUnknownAliasType = errors.New("unknown alias type")
ErrUnknownOwnerType = errors.New("unknown owner type")
ErrUnknownAutoApproverType = errors.New("unknown auto approver type")
ErrInvalidAlias = errors.New("invalid alias")
ErrInvalidAutoApprover = errors.New("invalid auto approver")
ErrInvalidOwner = errors.New("invalid owner")
ErrInvalidAlias = errors.New("invalid alias")
ErrInvalidAutoApprover = errors.New("invalid auto approver")
ErrInvalidOwner = errors.New("invalid owner")
)
// Sentinel errors for format validation.
@ -65,16 +65,16 @@ var (
// Sentinel errors for resolution/lookup failures.
var (
ErrUserNotFound = errors.New("user not found")
ErrMultipleUsersFound = errors.New("multiple users found")
ErrHostNotResolved = errors.New("unable to resolve host")
ErrGroupNotDefined = errors.New("group not defined in policy")
ErrTagNotDefined = errors.New("tag not defined in policy")
ErrHostNotDefined = errors.New("host not defined in policy")
ErrInvalidIPAddress = errors.New("invalid IP address")
ErrNestedGroups = errors.New("nested groups not allowed")
ErrInvalidGroupMember = errors.New("invalid group member type")
ErrGroupValueNotArray = errors.New("group value must be an array")
ErrUserNotFound = errors.New("user not found")
ErrMultipleUsersFound = errors.New("multiple users found")
ErrHostNotResolved = errors.New("unable to resolve host")
ErrGroupNotDefined = errors.New("group not defined in policy")
ErrTagNotDefined = errors.New("tag not defined in policy")
ErrHostNotDefined = errors.New("host not defined in policy")
ErrInvalidIPAddress = errors.New("invalid IP address")
ErrNestedGroups = errors.New("nested groups not allowed")
ErrInvalidGroupMember = errors.New("invalid group member type")
ErrGroupValueNotArray = errors.New("group value must be an array")
ErrAutoApproverNotAlias = errors.New("auto approver is not an alias")
)
@ -91,10 +91,10 @@ var (
// Sentinel errors for SSH aliases.
var (
ErrAliasNotSupportedSSHSrc = errors.New("alias type not supported for SSH source")
ErrAliasNotSupportedSSHDst = errors.New("alias type not supported for SSH destination")
ErrUnknownSSHSrcAliasType = errors.New("unknown SSH source alias type")
ErrUnknownSSHDstAliasType = errors.New("unknown SSH destination alias type")
ErrAliasNotSupportedSSHSrc = errors.New("alias type not supported for SSH source")
ErrAliasNotSupportedSSHDst = errors.New("alias type not supported for SSH destination")
ErrUnknownSSHSrcAliasType = errors.New("unknown SSH source alias type")
ErrUnknownSSHDstAliasType = errors.New("unknown SSH destination alias type")
)
// Sentinel errors for policy parsing.
@ -212,7 +212,8 @@ func (p Prefix) MarshalJSON() ([]byte, error) {
func (u *Username) UnmarshalJSON(b []byte) error {
*u = Username(strings.Trim(string(b), `"`))
if err := u.Validate(); err != nil {
err := u.Validate()
if err != nil {
return err
}
@ -306,7 +307,8 @@ func (g Group) Validate() error {
func (g *Group) UnmarshalJSON(b []byte) error {
*g = Group(strings.Trim(string(b), `"`))
if err := g.Validate(); err != nil {
err := g.Validate()
if err != nil {
return err
}
@ -371,7 +373,8 @@ func (t Tag) Validate() error {
func (t *Tag) UnmarshalJSON(b []byte) error {
*t = Tag(strings.Trim(string(b), `"`))
if err := t.Validate(); err != nil {
err := t.Validate()
if err != nil {
return err
}
@ -421,7 +424,8 @@ func (h Host) Validate() error {
func (h *Host) UnmarshalJSON(b []byte) error {
*h = Host(strings.Trim(string(b), `"`))
if err := h.Validate(); err != nil {
err := h.Validate()
if err != nil {
return err
}
@ -582,7 +586,8 @@ func (ag AutoGroup) Validate() error {
func (ag *AutoGroup) UnmarshalJSON(b []byte) error {
*ag = AutoGroup(strings.Trim(string(b), `"`))
if err := ag.Validate(); err != nil {
err := ag.Validate()
if err != nil {
return err
}
@ -669,7 +674,8 @@ type AliasWithPorts struct {
func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error {
var v any
if err := json.Unmarshal(b, &v); err != nil {
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
@ -1049,14 +1055,16 @@ func (g Groups) Contains(group *Group) error {
func (g *Groups) UnmarshalJSON(b []byte) error {
// First unmarshal as a generic map to validate group names first
var rawMap map[string]any
if err := json.Unmarshal(b, &rawMap); err != nil {
err := json.Unmarshal(b, &rawMap)
if err != nil {
return err
}
// Validate group names first before checking data types
for key := range rawMap {
group := Group(key)
if err := group.Validate(); err != nil {
err := group.Validate()
if err != nil {
return err
}
}
@ -1095,7 +1103,8 @@ func (g *Groups) UnmarshalJSON(b []byte) error {
for _, u := range value {
username := Username(u)
if err := username.Validate(); err != nil {
err := username.Validate()
if err != nil {
if isGroup(u) {
return fmt.Errorf("%w: found %q inside %q", ErrNestedGroups, u, group)
}
@ -1117,7 +1126,8 @@ type Hosts map[Host]Prefix
func (h *Hosts) UnmarshalJSON(b []byte) error {
var rawHosts map[string]string
if err := json.Unmarshal(b, &rawHosts, policyJSONOpts...); err != nil {
err := json.Unmarshal(b, &rawHosts, policyJSONOpts...)
if err != nil {
return err
}
@ -1125,12 +1135,14 @@ func (h *Hosts) UnmarshalJSON(b []byte) error {
for key, value := range rawHosts {
host := Host(key)
if err := host.Validate(); err != nil {
err := host.Validate()
if err != nil {
return err
}
var prefix Prefix
if err := prefix.parseString(value); err != nil {
err = prefix.parseString(value)
if err != nil {
return fmt.Errorf("%w: hostname %q value %q", ErrInvalidIPAddress, key, value)
}
@ -1476,7 +1488,8 @@ func (p *Protocol) UnmarshalJSON(b []byte) error {
*p = Protocol(strings.ToLower(str))
// Validate the protocol
if err := p.validate(); err != nil {
err := p.validate()
if err != nil {
return err
}
@ -1732,12 +1745,14 @@ func (p *Policy) validate() error {
case *AutoGroup:
ag := src
if err := validateAutogroupSupported(ag); err != nil {
err := validateAutogroupSupported(ag)
if err != nil {
errs = append(errs, err)
continue
}
if err := validateAutogroupForSrc(ag); err != nil {
err = validateAutogroupForSrc(ag)
if err != nil {
errs = append(errs, err)
continue
}
@ -1748,7 +1763,8 @@ func (p *Policy) validate() error {
}
case *Tag:
tagOwner := src
if err := p.TagOwners.Contains(tagOwner); err != nil {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
}
}
@ -1764,12 +1780,14 @@ func (p *Policy) validate() error {
case *AutoGroup:
ag := dst.Alias.(*AutoGroup)
if err := validateAutogroupSupported(ag); err != nil {
err := validateAutogroupSupported(ag)
if err != nil {
errs = append(errs, err)
continue
}
if err := validateAutogroupForDst(ag); err != nil {
err = validateAutogroupForDst(ag)
if err != nil {
errs = append(errs, err)
continue
}
@ -1780,14 +1798,16 @@ func (p *Policy) validate() error {
}
case *Tag:
tagOwner := dst.Alias.(*Tag)
if err := p.TagOwners.Contains(tagOwner); err != nil {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
}
}
}
// Validate protocol-port compatibility
if err := validateProtocolPortCompatibility(acl.Protocol, acl.Destinations); err != nil {
err := validateProtocolPortCompatibility(acl.Protocol, acl.Destinations)
if err != nil {
errs = append(errs, err)
}
}
@ -1796,7 +1816,8 @@ func (p *Policy) validate() error {
for _, user := range ssh.Users {
if strings.HasPrefix(string(user), "autogroup:") {
maybeAuto := AutoGroup(user)
if err := validateAutogroupForSSHUser(&maybeAuto); err != nil {
err := validateAutogroupForSSHUser(&maybeAuto)
if err != nil {
errs = append(errs, err)
continue
}
@ -1808,23 +1829,27 @@ func (p *Policy) validate() error {
case *AutoGroup:
ag := src
if err := validateAutogroupSupported(ag); err != nil {
err := validateAutogroupSupported(ag)
if err != nil {
errs = append(errs, err)
continue
}
if err := validateAutogroupForSSHSrc(ag); err != nil {
err = validateAutogroupForSSHSrc(ag)
if err != nil {
errs = append(errs, err)
continue
}
case *Group:
g := src
if err := p.Groups.Contains(g); err != nil {
err := p.Groups.Contains(g)
if err != nil {
errs = append(errs, err)
}
case *Tag:
tagOwner := src
if err := p.TagOwners.Contains(tagOwner); err != nil {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
}
}
@ -1834,18 +1859,21 @@ func (p *Policy) validate() error {
switch dst := dst.(type) {
case *AutoGroup:
ag := dst
if err := validateAutogroupSupported(ag); err != nil {
err := validateAutogroupSupported(ag)
if err != nil {
errs = append(errs, err)
continue
}
if err := validateAutogroupForSSHDst(ag); err != nil {
err = validateAutogroupForSSHDst(ag)
if err != nil {
errs = append(errs, err)
continue
}
case *Tag:
tagOwner := dst
if err := p.TagOwners.Contains(tagOwner); err != nil {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
}
}
@ -1857,7 +1885,8 @@ func (p *Policy) validate() error {
switch tagOwner := tagOwner.(type) {
case *Group:
g := tagOwner
if err := p.Groups.Contains(g); err != nil {
err := p.Groups.Contains(g)
if err != nil {
errs = append(errs, err)
}
case *Tag:
@ -1882,12 +1911,14 @@ func (p *Policy) validate() error {
switch approver := approver.(type) {
case *Group:
g := approver
if err := p.Groups.Contains(g); err != nil {
err := p.Groups.Contains(g)
if err != nil {
errs = append(errs, err)
}
case *Tag:
tagOwner := approver
if err := p.TagOwners.Contains(tagOwner); err != nil {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
}
}
@ -1898,12 +1929,14 @@ func (p *Policy) validate() error {
switch approver := approver.(type) {
case *Group:
g := approver
if err := p.Groups.Contains(g); err != nil {
err := p.Groups.Contains(g)
if err != nil {
errs = append(errs, err)
}
case *Tag:
tagOwner := approver
if err := p.TagOwners.Contains(tagOwner); err != nil {
err := p.TagOwners.Contains(tagOwner)
if err != nil {
errs = append(errs, err)
}
}

View file

@ -378,7 +378,7 @@ func requireAllClientsOnlineWithSingleTimeout(t *testing.T, headscale ControlSer
stateStr = "online"
}
assert.True(c, allMatch, fmt.Sprintf("Not all %d nodes are %s across all systems (batcher, mapresponses, nodestore)", len(expectedNodes), stateStr))
assert.True(c, allMatch, "Not all %d nodes are %s across all systems (batcher, mapresponses, nodestore)", len(expectedNodes), stateStr)
}, timeout, onlineCheckRetryInterval, message)
}
@ -534,7 +534,7 @@ func requireAllClientsNetInfoAndDERP(t *testing.T, headscale ControlServer, expe
// Validate that the node has a valid DERP server (PreferredDERP should be > 0)
preferredDERP := node.Hostinfo.NetInfo.PreferredDERP
assert.Greater(c, preferredDERP, 0, "Node %d (%s) should have a valid DERP server (PreferredDERP > 0) for relay connectivity, got %d", nodeID, node.Hostname, preferredDERP)
assert.Positive(c, preferredDERP, "Node %d (%s) should have a valid DERP server (PreferredDERP > 0) for relay connectivity, got %d", nodeID, node.Hostname, preferredDERP)
t.Logf("Node %d (%s) has valid NetInfo with DERP server %d at %s", nodeID, node.Hostname, preferredDERP, time.Now().Format(TimestampFormat))
}

View file

@ -721,7 +721,8 @@ func (t *HeadscaleInContainer) SaveMetrics(savePath string) error {
// extractTarToDirectory extracts a tar archive to a directory.
func extractTarToDirectory(tarData []byte, targetDir string) error {
if err := os.MkdirAll(targetDir, dirPermissions); err != nil {
err := os.MkdirAll(targetDir, dirPermissions)
if err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
}
@ -780,7 +781,8 @@ func extractTarToDirectory(tarData []byte, targetDir string) error {
switch header.Typeflag {
case tar.TypeDir:
// Create directory
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
err := os.MkdirAll(targetPath, os.FileMode(header.Mode))
if err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
}
case tar.TypeReg:

View file

@ -53,16 +53,16 @@ const (
var usePostgresForTest = envknob.Bool("HEADSCALE_INTEGRATION_POSTGRES")
var (
errNoHeadscaleAvailable = errors.New("no headscale available")
errNoUserAvailable = errors.New("no user available")
errNoClientFound = errors.New("client not found")
errUserAlreadyInNetwork = errors.New("users can only have nodes placed in one network")
errNoNetworkNamed = errors.New("no network named")
errNoIPAMConfig = errors.New("no IPAM config found in network")
errHTTPClientNil = errors.New("http client is nil")
errLoginURLNil = errors.New("login url is nil")
errUnexpectedStatusCode = errors.New("unexpected status code")
errNetworkDoesNotExist = errors.New("network does not exist")
errNoHeadscaleAvailable = errors.New("no headscale available")
errNoUserAvailable = errors.New("no user available")
errNoClientFound = errors.New("client not found")
errUserAlreadyInNetwork = errors.New("users can only have nodes placed in one network")
errNoNetworkNamed = errors.New("no network named")
errNoIPAMConfig = errors.New("no IPAM config found in network")
errHTTPClientNil = errors.New("http client is nil")
errLoginURLNil = errors.New("login url is nil")
errUnexpectedStatusCode = errors.New("unexpected status code")
errNetworkDoesNotExist = errors.New("network does not exist")
// AllVersions represents a list of Tailscale versions the suite
// uses to test compatibility with the ControlServer.
@ -391,13 +391,15 @@ func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {
if s.mockOIDC.r != nil {
s.mockOIDC.r.Close()
if err := s.mockOIDC.r.Close(); err != nil {
err := s.mockOIDC.r.Close()
if err != nil {
log.Printf("failed to tear down oidc server: %s", err)
}
}
for _, network := range s.networks {
if err := network.Close(); err != nil {
err := network.Close()
if err != nil {
log.Printf("failed to tear down network: %s", err)
}
}
@ -775,7 +777,8 @@ func (s *Scenario) WaitForTailscaleSyncPerUser(timeout, retryInterval time.Durat
})
}
if err := user.syncWaitGroup.Wait(); err != nil {
err := user.syncWaitGroup.Wait()
if err != nil {
allErrors = append(allErrors, err)
}
}
@ -938,7 +941,8 @@ func (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {
log.Printf("client %s is ready", client.Hostname())
}
if err := user.joinWaitGroup.Wait(); err != nil {
err := user.joinWaitGroup.Wait()
if err != nil {
return err
}

View file

@ -46,11 +46,11 @@ const (
dockerExecuteTimeout = 60 * time.Second
// Container restart and backoff timeouts.
containerRestartTimeout = 30 // seconds, used by Docker API
tailscaleVersionTimeout = 5 * time.Second
containerRestartBackoff = 30 * time.Second
backoffMaxElapsedTime = 10 * time.Second
curlFailFastMaxTime = 2 * time.Second
containerRestartTimeout = 30 // seconds, used by Docker API
tailscaleVersionTimeout = 5 * time.Second
containerRestartBackoff = 30 * time.Second
backoffMaxElapsedTime = 10 * time.Second
curlFailFastMaxTime = 2 * time.Second
)
var (