headscale/integration/acl_test.go
Kristoffer Dalby 4be13baf3f state: update policy manager when deleting users
Make DeleteUser call updatePolicyManagerUsers() to refresh the policy
manager's cached user list after user deletion. This ensures consistency
with CreateUser, UpdateUser, and RenameUser which all update the policy
manager.

Previously, DeleteUser only removed the user from the database without
updating the policy manager. This could leave stale user references in
the cached user list, potentially causing issues when policy is
re-evaluated.

The gRPC handler now uses the change returned from DeleteUser instead of
manually constructing change.UserRemoved().

Fixes #2967
2026-01-20 15:41:19 +01:00

3524 lines
114 KiB
Go

package integration
import (
"fmt"
"net/netip"
"strconv"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/ptr"
)
var veryLargeDestination = []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("0.0.0.0/5"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("8.0.0.0/7"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("11.0.0.0/8"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("12.0.0.0/6"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("16.0.0.0/4"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("32.0.0.0/3"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("64.0.0.0/2"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("128.0.0.0/3"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("160.0.0.0/5"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("168.0.0.0/6"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.0.0.0/12"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.32.0.0/11"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.64.0.0/10"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("172.128.0.0/9"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("173.0.0.0/8"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("174.0.0.0/7"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("176.0.0.0/4"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.0.0.0/9"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.128.0.0/11"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.160.0.0/13"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.169.0.0/16"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.170.0.0/15"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.172.0.0/14"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.176.0.0/12"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("192.192.0.0/10"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("193.0.0.0/8"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("194.0.0.0/7"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("196.0.0.0/6"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("200.0.0.0/5"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("208.0.0.0/4"), tailcfg.PortRangeAny),
}
func aclScenario(
t *testing.T,
policy *policyv2.Policy,
clientsPerUser int,
) *Scenario {
t.Helper()
spec := ScenarioSpec{
NodesPerUser: clientsPerUser,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
// Alpine containers dont have ip6tables set up, which causes
// tailscaled to stop configuring the wgengine, causing it
// to not configure DNS.
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
return scenario
}
// This tests a different ACL mechanism, if a host _cannot_ connect
// to another node at all based on ACL, it should just not be part
// of the NetMap sent to the host. This is slightly different than
// the other tests as we can just check if the hosts are present
// or not.
func TestACLHostsInNetMapTable(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{"user1", "user2"},
}
// NOTE: All want cases currently checks the
// total count of expected peers, this would
// typically be the client count of the users
// they can access minus one (them self).
tests := map[string]struct {
users ScenarioSpec
policy policyv2.Policy
want map[string]int
}{
// Test that when we have no ACL, each client netmap has
// the amount of peers of the total amount of clients
"base-acls": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns2 + ns1
},
},
// Test that when we have two users, which cannot see
// each other, each node has only the number of pairs from
// their own user.
"two-isolated-users": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 1,
"user2@test.no": 1,
},
},
// Test that when we have two users, with ACLs and they
// are restricted to a single port, nodes are still present
// in the netmap.
"two-restricted-present-in-netmap": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
},
}, want: map[string]int{
"user1@test.no": 3,
"user2@test.no": 3,
},
},
// Test that when we have two users, that are isolated,
// but one can see the others, we have the appropriate number
// of peers. This will still result in all the peers as we
// need them present on the other side for the "return path".
"two-ns-one-isolated": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns1 + ns2 (return path)
},
},
"very-large-destination-prefix-1372": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: append(
[]policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
veryLargeDestination...,
),
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: append(
[]policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
veryLargeDestination...,
),
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: append(
[]policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
veryLargeDestination...,
),
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns1 + ns2 (return path)
},
},
"ipv6-acls-1470": {
users: spec,
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("0.0.0.0/0"), tailcfg.PortRangeAny),
aliasWithPorts(prefixp("::/0"), tailcfg.PortRangeAny),
},
},
},
}, want: map[string]int{
"user1@test.no": 3, // ns1 + ns2
"user2@test.no": 3, // ns2 + ns1
},
},
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
caseSpec := testCase.users
scenario, err := NewScenario(caseSpec)
require.NoError(t, err)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{},
hsic.WithACLPolicy(&testCase.policy),
)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
require.NoError(t, err)
err = scenario.WaitForTailscaleSyncWithPeerCount(testCase.want["user1@test.no"], integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err)
for _, client := range allClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
user := status.User[status.Self.UserID].LoginName
assert.Len(c, status.Peer, (testCase.want[user]))
}, 10*time.Second, 200*time.Millisecond, "Waiting for expected peer visibility")
}
})
}
}
// Test to confirm that we can use user:80 from one user
// This should make the node appear in the peer list, but
// disallow ping.
// This ACL will not allow user1 access its own machines.
// Reported: https://github.com/juanfont/headscale/issues/699
func TestACLAllowUser80Dst(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 80, Last: 80}),
},
},
},
},
1,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 cannot reach user1")
}
}
}
func TestACLDenyAllPort80(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:integration-acl-test"): []policyv2.Username{policyv2.Username("user1@"), policyv2.Username("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:integration-acl-test")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRange{First: 22, Last: 22}),
},
},
},
},
4,
)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
require.NoError(t, err)
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
for _, client := range allClients {
for _, hostname := range allHostnames {
// We will always be allowed to check _self_ so shortcircuit
// the test here.
if strings.Contains(hostname, client.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", hostname)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying all traffic is denied")
}
}
}
// Test to confirm that we can use user:* from one user.
// This ACL will not allow user1 access its own machines.
// Reported: https://github.com/juanfont/headscale/issues/699
func TestACLAllowUserDst(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
2,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 cannot reach user1")
}
}
}
// Test to confirm that we can use *:* from one user
// Reported: https://github.com/juanfont/headscale/issues/699
func TestACLAllowStarDst(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
},
2,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.Error(c, err)
assert.Empty(c, result)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 cannot reach user1")
}
}
}
// TestACLNamedHostsCanReachBySubnet is the same as
// TestACLNamedHostsCanReach, but it tests if we expand a
// full CIDR correctly. All routes should work.
func TestACLNamedHostsCanReachBySubnet(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
Hosts: policyv2.Hosts{
"all": policyv2.Prefix(netip.MustParsePrefix("100.64.0.0/24")),
},
ACLs: []policyv2.ACL{
// Everyone can curl test3
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("all"), tailcfg.PortRangeAny),
},
},
},
},
3,
)
defer scenario.ShutdownAssertNoPanics(t)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
// Test that user2 can visit all user1
// Test that user2 can visit all user1, note that this
// is _not_ symmetric.
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user2 can reach user1")
}
}
}
// This test aims to cover cases where individual hosts are allowed and denied
// access based on their assigned hostname
// https://github.com/juanfont/headscale/issues/941
//
// ACL = [{
// "DstPorts": [{
// "Bits": null,
// "IP": "100.64.0.3/32",
// "Ports": {
// "First": 0,
// "Last": 65535
// }
// }],
// "SrcIPs": ["*"]
// }, {
//
// "DstPorts": [{
// "Bits": null,
// "IP": "100.64.0.2/32",
// "Ports": {
// "First": 0,
// "Last": 65535
// }
// }],
// "SrcIPs": ["100.64.0.1/32"]
// }]
//
// ACL Cache Map= {
// "*": {
// "100.64.0.3/32": {}
// },
// "100.64.0.1/32": {
// "100.64.0.2/32": {}
// }
// }
//
// https://github.com/juanfont/headscale/issues/941
// Additionally verify ipv6 behaviour, part of
// https://github.com/juanfont/headscale/issues/809
func TestACLNamedHostsCanReach(t *testing.T) {
IntegrationSkip(t)
tests := map[string]struct {
policy policyv2.Policy
}{
"ipv4": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("100.64.0.1/32")),
"test2": policyv2.Prefix(netip.MustParsePrefix("100.64.0.2/32")),
"test3": policyv2.Prefix(netip.MustParsePrefix("100.64.0.3/32")),
},
ACLs: []policyv2.ACL{
// Everyone can curl test3
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test3"), tailcfg.PortRangeAny),
},
},
// test1 can curl test2
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
"ipv6": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::1/128")),
"test2": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::2/128")),
"test3": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::3/128")),
},
ACLs: []policyv2.ACL{
// Everyone can curl test3
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test3"), tailcfg.PortRangeAny),
},
},
// test1 can curl test2
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
scenario := aclScenario(t,
&testCase.policy,
2,
)
defer scenario.ShutdownAssertNoPanics(t)
// Since user/users dont matter here, we basically expect that some clients
// will be assigned these ips and that we can pick them up for our own use.
test1ip4 := netip.MustParseAddr("100.64.0.1")
test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1")
test1, err := scenario.FindTailscaleClientByIP(test1ip6)
require.NoError(t, err)
test1fqdn, err := test1.FQDN()
require.NoError(t, err)
test1ip4URL := fmt.Sprintf("http://%s/etc/hostname", test1ip4.String())
test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String())
test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn)
test2ip4 := netip.MustParseAddr("100.64.0.2")
test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2")
test2, err := scenario.FindTailscaleClientByIP(test2ip6)
require.NoError(t, err)
test2fqdn, err := test2.FQDN()
require.NoError(t, err)
test2ip4URL := fmt.Sprintf("http://%s/etc/hostname", test2ip4.String())
test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String())
test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn)
test3ip4 := netip.MustParseAddr("100.64.0.3")
test3ip6 := netip.MustParseAddr("fd7a:115c:a1e0::3")
test3, err := scenario.FindTailscaleClientByIP(test3ip6)
require.NoError(t, err)
test3fqdn, err := test3.FQDN()
require.NoError(t, err)
test3ip4URL := fmt.Sprintf("http://%s/etc/hostname", test3ip4.String())
test3ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test3ip6.String())
test3fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test3fqdn)
// test1 can query test3
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test3ip4URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip4URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test3 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test3ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test3 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test3fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test3 via FQDN")
// test2 can query test3
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test3ip4URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip4URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test2 should reach test3 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test3ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test2 should reach test3 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test3fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test3 with URL %s, expected hostname of 13 chars, got %s",
test3fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test2 should reach test3 via FQDN")
// test3 cannot query test1
result, err := test3.Curl(test1ip4URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test1ip6URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test1fqdnURL)
assert.Empty(t, result)
require.Error(t, err)
// test3 cannot query test2
result, err = test3.Curl(test2ip4URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test2ip6URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test3.Curl(test2fqdnURL)
assert.Empty(t, result)
require.Error(t, err)
// test1 can query test2
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ip4URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s",
test2ip4URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s",
test2ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test2 with URL %s, expected hostname of 13 chars, got %s",
test2fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via FQDN")
// test2 cannot query test1
result, err = test2.Curl(test1ip4URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test2.Curl(test1ip6URL)
assert.Empty(t, result)
require.Error(t, err)
result, err = test2.Curl(test1fqdnURL)
assert.Empty(t, result)
require.Error(t, err)
})
}
}
// TestACLDevice1CanAccessDevice2 is a table driven test that aims to test
// the various ways to achieve a connection between device1 and device2 where
// device1 can access device2, but not the other way around. This can be
// viewed as one of the most important tests here as it covers most of the
// syntax that can be used.
//
// Before adding new taste cases, consider if it can be reduced to a case
// in this function.
func TestACLDevice1CanAccessDevice2(t *testing.T) {
IntegrationSkip(t)
tests := map[string]struct {
policy policyv2.Policy
}{
"ipv4": {
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{prefixp("100.64.0.1/32")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("100.64.0.2/32"), tailcfg.PortRangeAny),
},
},
},
},
},
"ipv6": {
policy: policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{prefixp("fd7a:115c:a1e0::1/128")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(prefixp("fd7a:115c:a1e0::2/128"), tailcfg.PortRangeAny),
},
},
},
},
},
"hostv4cidr": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("100.64.0.1/32")),
"test2": policyv2.Prefix(netip.MustParsePrefix("100.64.0.2/32")),
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
"hostv6cidr": {
policy: policyv2.Policy{
Hosts: policyv2.Hosts{
"test1": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::1/128")),
"test2": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::2/128")),
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{hostp("test1")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
},
},
},
},
},
"group": {
policy: policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:one"): []policyv2.Username{policyv2.Username("user1@")},
policyv2.Group("group:two"): []policyv2.Username{policyv2.Username("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:one")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:two"), tailcfg.PortRangeAny),
},
},
},
},
},
// TODO(kradalby): Add similar tests for Tags, might need support
// in the scenario function when we create or join the clients.
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
scenario := aclScenario(t, &testCase.policy, 1)
defer scenario.ShutdownAssertNoPanics(t)
test1ip := netip.MustParseAddr("100.64.0.1")
test1ip6 := netip.MustParseAddr("fd7a:115c:a1e0::1")
test1, err := scenario.FindTailscaleClientByIP(test1ip)
assert.NotNil(t, test1)
require.NoError(t, err)
test1fqdn, err := test1.FQDN()
require.NoError(t, err)
test1ipURL := fmt.Sprintf("http://%s/etc/hostname", test1ip.String())
test1ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test1ip6.String())
test1fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test1fqdn)
test2ip := netip.MustParseAddr("100.64.0.2")
test2ip6 := netip.MustParseAddr("fd7a:115c:a1e0::2")
test2, err := scenario.FindTailscaleClientByIP(test2ip)
assert.NotNil(t, test2)
require.NoError(t, err)
test2fqdn, err := test2.FQDN()
require.NoError(t, err)
test2ipURL := fmt.Sprintf("http://%s/etc/hostname", test2ip.String())
test2ip6URL := fmt.Sprintf("http://[%s]/etc/hostname", test2ip6.String())
test2fqdnURL := fmt.Sprintf("http://%s/etc/hostname", test2fqdn)
// test1 can query test2
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ipURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s",
test2ipURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2ip6URL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s",
test2ip6URL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test1.Curl(test2fqdnURL)
assert.NoError(c, err)
assert.Lenf(
c,
result,
13,
"failed to connect from test1 to test with URL %s, expected hostname of 13 chars, got %s",
test2fqdnURL,
result,
)
}, 10*time.Second, 200*time.Millisecond, "test1 should reach test2 via FQDN")
// test2 cannot query test1 (negative test case)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test1ipURL)
assert.Error(c, err)
assert.Empty(c, result)
}, 10*time.Second, 200*time.Millisecond, "test2 should NOT reach test1 via IPv4")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test1ip6URL)
assert.Error(c, err)
assert.Empty(c, result)
}, 10*time.Second, 200*time.Millisecond, "test2 should NOT reach test1 via IPv6")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := test2.Curl(test1fqdnURL)
assert.Error(c, err)
assert.Empty(c, result)
}, 10*time.Second, 200*time.Millisecond, "test2 should NOT reach test1 via FQDN")
})
}
}
func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
// Alpine containers dont have ip6tables set up, which causes
// tailscaled to stop configuring the wgengine, causing it
// to not configure DNS.
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithTestName("policyreload"),
hsic.WithPolicyMode(types.PolicyModeDB),
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
all := append(user1Clients, user2Clients...)
// Initially all nodes can reach each other
for _, client := range all {
for _, peer := range all {
if client.ContainerID() == peer.ContainerID() {
continue
}
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying user1 can reach user2")
}
}
headscale, err := scenario.Headscale()
require.NoError(t, err)
p := policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
Hosts: policyv2.Hosts{},
}
err = headscale.SetPolicy(&p)
require.NoError(t, err)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Get the current policy and check
// if it is the same as the one we set.
var output *policyv2.Policy
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"policy",
"get",
"--output",
"json",
},
&output,
)
assert.NoError(ct, err)
assert.Len(t, output.ACLs, 1)
if diff := cmp.Diff(p, *output, cmpopts.IgnoreUnexported(policyv2.Policy{}), cmpopts.EquateEmpty()); diff != "" {
ct.Errorf("unexpected policy(-want +got):\n%s", diff)
}
}, 30*time.Second, 1*time.Second, "verifying that the new policy took place")
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Test that user1 can visit all user2
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
assert.NoError(ct, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url)
assert.Len(ct, result, 13)
assert.NoError(ct, err)
}
}
// Test that user2 _cannot_ visit user1
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
assert.NoError(ct, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url)
assert.Empty(ct, result)
assert.Error(ct, err)
}
}
}, 30*time.Second, 1*time.Second, "new policy did not get propagated to nodes")
}
func TestACLAutogroupMember(t *testing.T) {
IntegrationSkip(t)
scenario := aclScenario(t,
&policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(ptr.To(policyv2.AutoGroupMember), tailcfg.PortRangeAny),
},
},
},
},
2,
)
defer scenario.ShutdownAssertNoPanics(t)
allClients, err := scenario.ListTailscaleClients()
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
// Test that untagged nodes can access each other
for _, client := range allClients {
var clientIsUntagged bool
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
clientIsUntagged = status.Self.Tags == nil || status.Self.Tags.Len() == 0
assert.True(c, clientIsUntagged, "Expected client %s to be untagged for autogroup:member test", client.Hostname())
}, 10*time.Second, 200*time.Millisecond, "Waiting for client %s to be untagged", client.Hostname())
if !clientIsUntagged {
continue
}
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
var peerIsUntagged bool
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := peer.Status()
assert.NoError(c, err)
peerIsUntagged = status.Self.Tags == nil || status.Self.Tags.Len() == 0
assert.True(c, peerIsUntagged, "Expected peer %s to be untagged for autogroup:member test", peer.Hostname())
}, 10*time.Second, 200*time.Millisecond, "Waiting for peer %s to be untagged", peer.Hostname())
if !peerIsUntagged {
continue
}
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s to %s", client.Hostname(), url)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 20*time.Second, 500*time.Millisecond, "Verifying autogroup:member connectivity")
}
}
}
func TestACLAutogroupTagged(t *testing.T) {
IntegrationSkip(t)
// Create a custom scenario for testing autogroup:tagged
spec := ScenarioSpec{
NodesPerUser: 2, // 2 nodes per user - one tagged, one untagged
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
policy := &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:test": policyv2.Owners{usernameOwner("user1@"), usernameOwner("user2@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupTagged)},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(ptr.To(policyv2.AutoGroupTagged), tailcfg.PortRangeAny),
},
},
},
}
// Create only the headscale server (not the full environment with users/nodes)
headscale, err := scenario.Headscale(
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl-autogroup-tagged"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
// Create users and nodes manually with specific tags
// Tags are now set via PreAuthKey (tags-as-identity model), not via --advertise-tags
for _, userStr := range spec.Users {
user, err := scenario.CreateUser(userStr)
require.NoError(t, err)
// Create two pre-auth keys per user: one tagged, one untagged
taggedAuthKey, err := scenario.CreatePreAuthKeyWithTags(user.GetId(), true, false, []string{"tag:test"})
require.NoError(t, err)
untaggedAuthKey, err := scenario.CreatePreAuthKey(user.GetId(), true, false)
require.NoError(t, err)
// Create nodes with proper naming
for i := range spec.NodesPerUser {
var (
authKey string
version string
)
if i == 0 {
// First node is tagged - use tagged PreAuthKey
authKey = taggedAuthKey.GetKey()
version = "head"
t.Logf("Creating tagged node for %s", userStr)
} else {
// Second node is untagged - use untagged PreAuthKey
authKey = untaggedAuthKey.GetKey()
version = "unstable"
t.Logf("Creating untagged node for %s", userStr)
}
// Get the network for this scenario
networks := scenario.Networks()
var network *dockertest.Network
if len(networks) > 0 {
network = networks[0]
}
// Create the tailscale node with appropriate options
opts := []tsic.Option{
tsic.WithCACert(headscale.GetCert()),
tsic.WithHeadscaleName(headscale.GetHostname()),
tsic.WithNetwork(network),
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
}
tsClient, err := tsic.New(
scenario.Pool(),
version,
opts...,
)
require.NoError(t, err)
err = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
// Login with the appropriate auth key (tags come from the PreAuthKey)
err = tsClient.Login(headscale.GetEndpoint(), authKey)
require.NoError(t, err)
err = tsClient.WaitForRunning(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
// Add client to user
userObj := scenario.GetOrCreateUser(userStr)
userObj.Clients[tsClient.Hostname()] = tsClient
}
}
allClients, err := scenario.ListTailscaleClients()
require.NoError(t, err)
require.Len(t, allClients, 4) // 2 users * 2 nodes each
// Wait for nodes to see only their allowed peers
// Tagged nodes should see each other (2 tagged nodes total)
// Untagged nodes should see no one
var (
taggedClients []TailscaleClient
untaggedClients []TailscaleClient
)
// First, categorize nodes by checking their tags
for _, client := range allClients {
hostname := client.Hostname()
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err)
if status.Self.Tags != nil && status.Self.Tags.Len() > 0 {
// This is a tagged node
assert.Len(ct, status.Peers(), 1, "tagged node %s should see exactly 1 peer", hostname)
// Add to tagged list only once we've verified it
found := false
for _, tc := range taggedClients {
if tc.Hostname() == hostname {
found = true
break
}
}
if !found {
taggedClients = append(taggedClients, client)
}
} else {
// This is an untagged node
assert.Empty(ct, status.Peers(), "untagged node %s should see 0 peers", hostname)
// Add to untagged list only once we've verified it
found := false
for _, uc := range untaggedClients {
if uc.Hostname() == hostname {
found = true
break
}
}
if !found {
untaggedClients = append(untaggedClients, client)
}
}
}, 30*time.Second, 1*time.Second, "verifying peer visibility for node %s", hostname)
}
// Verify we have the expected number of tagged and untagged nodes
require.Len(t, taggedClients, 2, "should have exactly 2 tagged nodes")
require.Len(t, untaggedClients, 2, "should have exactly 2 untagged nodes")
// Explicitly verify tags on tagged nodes
for _, client := range taggedClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
assert.NotNil(c, status.Self.Tags, "tagged node %s should have tags", client.Hostname())
assert.Positive(c, status.Self.Tags.Len(), "tagged node %s should have at least one tag", client.Hostname())
}, 10*time.Second, 200*time.Millisecond, "Waiting for tags to be applied to tagged nodes")
}
// Verify untagged nodes have no tags
for _, client := range untaggedClients {
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := client.Status()
assert.NoError(c, err)
if status.Self.Tags != nil {
assert.Equal(c, 0, status.Self.Tags.Len(), "untagged node %s should have no tags", client.Hostname())
}
}, 10*time.Second, 200*time.Millisecond, "Waiting to verify untagged nodes have no tags")
}
// Test that tagged nodes can communicate with each other
for _, client := range taggedClients {
for _, peer := range taggedClients {
if client.Hostname() == peer.Hostname() {
continue
}
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("Testing connection from tagged node %s to tagged node %s", client.Hostname(), peer.Hostname())
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(ct, err)
assert.Len(ct, result, 13)
}, 20*time.Second, 500*time.Millisecond, "tagged nodes should be able to communicate")
}
}
// Test that untagged nodes cannot communicate with anyone
for _, client := range untaggedClients {
// Try to reach tagged nodes (should fail)
for _, peer := range taggedClients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("Testing connection from untagged node %s to tagged node %s (should fail)", client.Hostname(), peer.Hostname())
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
result, err := client.CurlFailFast(url)
assert.Empty(ct, result)
assert.Error(ct, err)
}, 5*time.Second, 200*time.Millisecond, "untagged nodes should not be able to reach tagged nodes")
}
// Try to reach other untagged nodes (should also fail)
for _, peer := range untaggedClients {
if client.Hostname() == peer.Hostname() {
continue
}
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("Testing connection from untagged node %s to untagged node %s (should fail)", client.Hostname(), peer.Hostname())
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
result, err := client.CurlFailFast(url)
assert.Empty(ct, result)
assert.Error(ct, err)
}, 5*time.Second, 200*time.Millisecond, "untagged nodes should not be able to reach other untagged nodes")
}
}
// Test that tagged nodes cannot reach untagged nodes
for _, client := range taggedClients {
for _, peer := range untaggedClients {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("Testing connection from tagged node %s to untagged node %s (should fail)", client.Hostname(), peer.Hostname())
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
result, err := client.CurlFailFast(url)
assert.Empty(ct, result)
assert.Error(ct, err)
}, 5*time.Second, 200*time.Millisecond, "tagged nodes should not be able to reach untagged nodes")
}
}
}
// Test that only devices owned by the same user can access each other and cannot access devices of other users
// Test structure:
// - user1: 2 regular nodes (tests autogroup:self for same-user access)
// - user2: 2 regular nodes (tests autogroup:self for same-user access and cross-user isolation)
// - user-router: 1 node with tag:router-node (tests that autogroup:self doesn't interfere with other rules).
func TestACLAutogroupSelf(t *testing.T) {
IntegrationSkip(t)
// Policy with TWO separate ACL rules:
// 1. autogroup:member -> autogroup:self (same-user access)
// 2. group:home -> tag:router-node (router access)
// This tests that autogroup:self doesn't prevent other rules from working
policy := &policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:home"): []policyv2.Username{
policyv2.Username("user1@"),
policyv2.Username("user2@"),
},
},
TagOwners: policyv2.TagOwners{
policyv2.Tag("tag:router-node"): policyv2.Owners{
usernameOwner("user-router@"),
},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:home")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:router-node"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:router-node")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:home"), tailcfg.PortRangeAny),
},
},
},
}
// Create custom scenario: user1 and user2 with regular nodes, plus user-router with tagged node
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl-autogroup-self"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
// Add router node for user-router (single shared router node)
networks := scenario.Networks()
var network *dockertest.Network
if len(networks) > 0 {
network = networks[0]
}
headscale, err := scenario.Headscale()
require.NoError(t, err)
routerUser, err := scenario.CreateUser("user-router")
require.NoError(t, err)
// Create a tagged PreAuthKey for the router node (tags-as-identity model)
authKey, err := scenario.CreatePreAuthKeyWithTags(routerUser.GetId(), true, false, []string{"tag:router-node"})
require.NoError(t, err)
// Create router node (tags come from the PreAuthKey)
routerClient, err := tsic.New(
scenario.Pool(),
"unstable",
tsic.WithCACert(headscale.GetCert()),
tsic.WithHeadscaleName(headscale.GetHostname()),
tsic.WithNetwork(network),
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
)
require.NoError(t, err)
err = routerClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
err = routerClient.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
err = routerClient.WaitForRunning(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
userRouterObj := scenario.GetOrCreateUser("user-router")
userRouterObj.Clients[routerClient.Hostname()] = routerClient
user1Clients, err := scenario.GetClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.GetClients("user2")
require.NoError(t, err)
var user1Regular, user2Regular []TailscaleClient
for _, client := range user1Clients {
status, err := client.Status()
require.NoError(t, err)
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
user1Regular = append(user1Regular, client)
}
}
for _, client := range user2Clients {
status, err := client.Status()
require.NoError(t, err)
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
user2Regular = append(user2Regular, client)
}
}
require.NotEmpty(t, user1Regular, "user1 should have regular (untagged) devices")
require.NotEmpty(t, user2Regular, "user2 should have regular (untagged) devices")
require.NotNil(t, routerClient, "router node should exist")
// Wait for all nodes to sync with their expected peer counts
// With our ACL policy:
// - Regular nodes (user1/user2): 1 same-user regular peer + 1 router-node = 2 peers
// - Router node: 2 user1 regular + 2 user2 regular = 4 peers
for _, client := range user1Regular {
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err, "user1 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
}
for _, client := range user2Regular {
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err, "user2 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
}
err = routerClient.WaitForPeers(4, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err, "router should see 4 peers (all group:home regular nodes)")
// Test that user1's regular devices can access each other
for _, client := range user1Regular {
for _, peer := range user1Regular {
if client.Hostname() == peer.Hostname() {
continue
}
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user1) to %s (user1)", client.Hostname(), fqdn)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device via autogroup:self")
}
}
// Test that user2's regular devices can access each other
for _, client := range user2Regular {
for _, peer := range user2Regular {
if client.Hostname() == peer.Hostname() {
continue
}
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user2) to %s (user2)", client.Hostname(), fqdn)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.Len(c, result, 13)
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device via autogroup:self")
}
}
// Test that user1's regular devices can access router-node
for _, client := range user1Regular {
fqdn, err := routerClient.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user1) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.NotEmpty(c, result, "user1 should be able to access router-node via group:home -> tag:router-node rule")
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach router-node (proves autogroup:self doesn't interfere)")
}
// Test that user2's regular devices can access router-node
for _, client := range user2Regular {
fqdn, err := routerClient.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user2) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.NotEmpty(c, result, "user2 should be able to access router-node via group:home -> tag:router-node rule")
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach router-node (proves autogroup:self doesn't interfere)")
}
// Test that devices from different users cannot access each other's regular devices
for _, client := range user1Regular {
for _, peer := range user2Regular {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user1) to %s (user2 regular) - should FAIL", client.Hostname(), fqdn)
result, err := client.Curl(url)
assert.Empty(t, result, "user1 should not be able to access user2's regular devices (autogroup:self isolation)")
assert.Error(t, err, "connection from user1 to user2 regular device should fail")
}
}
for _, client := range user2Regular {
for _, peer := range user1Regular {
fqdn, err := peer.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user2) to %s (user1 regular) - should FAIL", client.Hostname(), fqdn)
result, err := client.Curl(url)
assert.Empty(t, result, "user2 should not be able to access user1's regular devices (autogroup:self isolation)")
assert.Error(t, err, "connection from user2 to user1 regular device should fail")
}
}
}
func TestACLPolicyPropagationOverTime(t *testing.T) {
IntegrationSkip(t)
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
// Install iptables to enable packet filtering for ACL tests.
// Packet filters are essential for testing autogroup:self and other ACL policies.
tsic.WithPackages("curl", "iptables", "ip6tables"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithTestName("aclpropagation"),
hsic.WithPolicyMode(types.PolicyModeDB),
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
allClients := append(user1Clients, user2Clients...)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Define the four policies we'll cycle through
allowAllPolicy := &policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{wildcard()},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
},
},
},
}
autogroupSelfPolicy := &policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
},
},
},
}
user1ToUser2Policy := &policyv2.Policy{
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
}
// Run through the policy cycle 5 times
for i := range 5 {
iteration := i + 1 // range 5 gives 0-4, we want 1-5 for logging
t.Logf("=== Iteration %d/5 ===", iteration)
// Phase 1: Allow all policy
t.Logf("Iteration %d: Setting allow-all policy", iteration)
err = headscale.SetPolicy(allowAllPolicy)
require.NoError(t, err)
// Wait for peer lists to sync with allow-all policy
t.Logf("Iteration %d: Phase 1 - Waiting for peer lists to sync with allow-all policy", iteration)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err, "iteration %d: Phase 1 - failed to sync after allow-all policy", iteration)
// Test all-to-all connectivity after state is settled
t.Logf("Iteration %d: Phase 1 - Testing all-to-all connectivity", iteration)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
for _, client := range allClients {
for _, peer := range allClients {
if client.ContainerID() == peer.ContainerID() {
continue
}
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.NoError(ct, err, "iteration %d: %s should reach %s with allow-all policy", iteration, client.Hostname(), fqdn)
assert.Len(ct, result, 13, "iteration %d: response from %s to %s should be valid", iteration, client.Hostname(), fqdn)
}
}
}, 90*time.Second, 500*time.Millisecond, "iteration %d: Phase 1 - all connectivity tests with allow-all policy", iteration)
// Phase 2: Autogroup:self policy (only same user can access)
t.Logf("Iteration %d: Phase 2 - Setting autogroup:self policy", iteration)
err = headscale.SetPolicy(autogroupSelfPolicy)
require.NoError(t, err)
// Wait for peer lists to sync with autogroup:self - ensures cross-user peers are removed
t.Logf("Iteration %d: Phase 2 - Waiting for peer lists to sync with autogroup:self", iteration)
err = scenario.WaitForTailscaleSyncPerUser(60*time.Second, 500*time.Millisecond)
require.NoError(t, err, "iteration %d: Phase 2 - failed to sync after autogroup:self policy", iteration)
// Test ALL connectivity (positive and negative) in one block after state is settled
t.Logf("Iteration %d: Phase 2 - Testing all connectivity with autogroup:self", iteration)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Positive: user1 can access user1's nodes
for _, client := range user1Clients {
for _, peer := range user1Clients {
if client.ContainerID() == peer.ContainerID() {
continue
}
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user1 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.NoError(ct, err, "iteration %d: user1 node %s should reach user1 node %s", iteration, client.Hostname(), peer.Hostname())
assert.Len(ct, result, 13, "iteration %d: response from %s to %s should be valid", iteration, client.Hostname(), peer.Hostname())
}
}
// Positive: user2 can access user2's nodes
for _, client := range user2Clients {
for _, peer := range user2Clients {
if client.ContainerID() == peer.ContainerID() {
continue
}
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user2 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.NoError(ct, err, "iteration %d: user2 %s should reach user2's node %s", iteration, client.Hostname(), fqdn)
assert.Len(ct, result, 13, "iteration %d: response from %s to %s should be valid", iteration, client.Hostname(), fqdn)
}
}
// Negative: user1 cannot access user2's nodes
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user2 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.Error(ct, err, "iteration %d: user1 %s should NOT reach user2's node %s with autogroup:self", iteration, client.Hostname(), fqdn)
assert.Empty(ct, result, "iteration %d: user1 %s->user2 %s should fail", iteration, client.Hostname(), fqdn)
}
}
// Negative: user2 cannot access user1's nodes
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user1 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.Error(ct, err, "iteration %d: user2 node %s should NOT reach user1 node %s", iteration, client.Hostname(), peer.Hostname())
assert.Empty(ct, result, "iteration %d: user2->user1 connection from %s to %s should fail", iteration, client.Hostname(), peer.Hostname())
}
}
}, 90*time.Second, 500*time.Millisecond, "iteration %d: Phase 2 - all connectivity tests with autogroup:self", iteration)
// Phase 2b: Add a new node to user1 and validate policy propagation
t.Logf("Iteration %d: Phase 2b - Adding new node to user1 during autogroup:self policy", iteration)
// Add a new node with the same options as the initial setup
// Get the network to use (scenario uses first network in list)
networks := scenario.Networks()
require.NotEmpty(t, networks, "scenario should have at least one network")
newClient := scenario.MustAddAndLoginClient(t, "user1", "all", headscale,
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
tsic.WithNetwork(networks[0]),
)
t.Logf("Iteration %d: Phase 2b - Added and logged in new node %s", iteration, newClient.Hostname())
// Wait for peer lists to sync after new node addition (now 3 user1 nodes, still autogroup:self)
t.Logf("Iteration %d: Phase 2b - Waiting for peer lists to sync after new node addition", iteration)
err = scenario.WaitForTailscaleSyncPerUser(60*time.Second, 500*time.Millisecond)
require.NoError(t, err, "iteration %d: Phase 2b - failed to sync after new node addition", iteration)
// Test ALL connectivity (positive and negative) in one block after state is settled
t.Logf("Iteration %d: Phase 2b - Testing all connectivity after new node addition", iteration)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Re-fetch client list to ensure latest state
user1ClientsWithNew, err := scenario.ListTailscaleClients("user1")
assert.NoError(ct, err, "iteration %d: failed to list user1 clients", iteration)
assert.Len(ct, user1ClientsWithNew, 3, "iteration %d: user1 should have 3 nodes", iteration)
// Positive: all user1 nodes can access each other
for _, client := range user1ClientsWithNew {
for _, peer := range user1ClientsWithNew {
if client.ContainerID() == peer.ContainerID() {
continue
}
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.NoError(ct, err, "iteration %d: user1 node %s should reach user1 node %s", iteration, client.Hostname(), peer.Hostname())
assert.Len(ct, result, 13, "iteration %d: response from %s to %s should be valid", iteration, client.Hostname(), peer.Hostname())
}
}
// Negative: user1 nodes cannot access user2's nodes
for _, client := range user1ClientsWithNew {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user2 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.Error(ct, err, "iteration %d: user1 node %s should NOT reach user2 node %s", iteration, client.Hostname(), peer.Hostname())
assert.Empty(ct, result, "iteration %d: user1->user2 connection from %s to %s should fail", iteration, client.Hostname(), peer.Hostname())
}
}
}, 90*time.Second, 500*time.Millisecond, "iteration %d: Phase 2b - all connectivity tests after new node addition", iteration)
// Delete the newly added node before Phase 3
t.Logf("Iteration %d: Phase 2b - Deleting the newly added node from user1", iteration)
// Get the node list and find the newest node (highest ID)
var (
nodeList []*v1.Node
nodeToDeleteID uint64
)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
nodeList, err = headscale.ListNodes("user1")
assert.NoError(ct, err)
assert.Len(ct, nodeList, 3, "should have 3 user1 nodes before deletion")
// Find the node with the highest ID (the newest one)
for _, node := range nodeList {
if node.GetId() > nodeToDeleteID {
nodeToDeleteID = node.GetId()
}
}
}, 10*time.Second, 500*time.Millisecond, "iteration %d: Phase 2b - listing nodes before deletion", iteration)
// Delete the node via headscale helper
t.Logf("Iteration %d: Phase 2b - Deleting node ID %d from headscale", iteration, nodeToDeleteID)
err = headscale.DeleteNode(nodeToDeleteID)
require.NoError(t, err, "iteration %d: failed to delete node %d", iteration, nodeToDeleteID)
// Remove the deleted client from the scenario's user.Clients map
// This is necessary for WaitForTailscaleSyncPerUser to calculate correct peer counts
t.Logf("Iteration %d: Phase 2b - Removing deleted client from scenario", iteration)
for clientName, client := range scenario.users["user1"].Clients {
status := client.MustStatus()
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
if err != nil {
continue
}
if nodeID == nodeToDeleteID {
delete(scenario.users["user1"].Clients, clientName)
t.Logf("Iteration %d: Phase 2b - Removed client %s (node ID %d) from scenario", iteration, clientName, nodeToDeleteID)
break
}
}
// Verify the node has been deleted
t.Logf("Iteration %d: Phase 2b - Verifying node deletion (expecting 2 user1 nodes)", iteration)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
nodeListAfter, err := headscale.ListNodes("user1")
assert.NoError(ct, err, "failed to list nodes after deletion")
assert.Len(ct, nodeListAfter, 2, "iteration %d: should have 2 user1 nodes after deletion, got %d", iteration, len(nodeListAfter))
}, 10*time.Second, 500*time.Millisecond, "iteration %d: Phase 2b - node should be deleted", iteration)
// Wait for sync after deletion to ensure peer counts are correct
// Use WaitForTailscaleSyncPerUser because autogroup:self is still active,
// so nodes only see same-user peers, not all nodes
t.Logf("Iteration %d: Phase 2b - Waiting for sync after node deletion (with autogroup:self)", iteration)
err = scenario.WaitForTailscaleSyncPerUser(60*time.Second, 500*time.Millisecond)
require.NoError(t, err, "iteration %d: failed to sync after node deletion", iteration)
// Refresh client lists after deletion to ensure we don't reference the deleted node
user1Clients, err = scenario.ListTailscaleClients("user1")
require.NoError(t, err, "iteration %d: failed to refresh user1 client list after deletion", iteration)
user2Clients, err = scenario.ListTailscaleClients("user2")
require.NoError(t, err, "iteration %d: failed to refresh user2 client list after deletion", iteration)
// Create NEW slice instead of appending to old allClients which still has deleted client
allClients = make([]TailscaleClient, 0, len(user1Clients)+len(user2Clients))
allClients = append(allClients, user1Clients...)
allClients = append(allClients, user2Clients...)
t.Logf("Iteration %d: Phase 2b completed - New node added, validated, and removed successfully", iteration)
// Phase 3: User1 can access user2 but not reverse
t.Logf("Iteration %d: Phase 3 - Setting user1->user2 directional policy", iteration)
err = headscale.SetPolicy(user1ToUser2Policy)
require.NoError(t, err)
// Note: Cannot use WaitForTailscaleSync() here because directional policy means
// user2 nodes don't see user1 nodes in their peer list (asymmetric visibility).
// The EventuallyWithT block below will handle waiting for policy propagation.
// Test ALL connectivity (positive and negative) in one block after policy settles
t.Logf("Iteration %d: Phase 3 - Testing all connectivity with directional policy", iteration)
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
// Positive: user1 can access user2's nodes
for _, client := range user1Clients {
for _, peer := range user2Clients {
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user2 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.NoError(ct, err, "iteration %d: user1 node %s should reach user2 node %s", iteration, client.Hostname(), peer.Hostname())
assert.Len(ct, result, 13, "iteration %d: response from %s to %s should be valid", iteration, client.Hostname(), peer.Hostname())
}
}
// Negative: user2 cannot access user1's nodes
for _, client := range user2Clients {
for _, peer := range user1Clients {
fqdn, err := peer.FQDN()
if !assert.NoError(ct, err, "iteration %d: failed to get FQDN for user1 peer %s", iteration, peer.Hostname()) {
continue
}
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
result, err := client.Curl(url)
assert.Error(ct, err, "iteration %d: user2 node %s should NOT reach user1 node %s", iteration, client.Hostname(), peer.Hostname())
assert.Empty(ct, result, "iteration %d: user2->user1 from %s to %s should fail", iteration, client.Hostname(), peer.Hostname())
}
}
}, 90*time.Second, 500*time.Millisecond, "iteration %d: Phase 3 - all connectivity tests with directional policy", iteration)
t.Logf("=== Iteration %d/5 completed successfully - All 3 phases passed ===", iteration)
}
t.Log("All 5 iterations completed successfully - ACL propagation is working correctly")
}
// TestACLTagPropagation validates that tag changes propagate immediately
// to ACLs without requiring a Headscale restart.
// This is the primary test for GitHub issue #2389.
func TestACLTagPropagation(t *testing.T) {
IntegrationSkip(t)
tests := []struct {
name string
policy *policyv2.Policy
spec ScenarioSpec
// setup returns clients and any initial state needed
setup func(t *testing.T, scenario *Scenario, headscale ControlServer) (
sourceClient TailscaleClient,
targetClient TailscaleClient,
targetNodeID uint64,
)
// initialAccess: should source be able to reach target before tag change?
initialAccess bool
// tagChange: what tags to set on target node (nil = test uses custom logic)
tagChange []string
// finalAccess: should source be able to reach target after tag change?
finalAccess bool
}{
{
name: "add-tag-grants-access",
policy: &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:shared": policyv2.Owners{usernameOwner("user1@")},
},
ACLs: []policyv2.ACL{
// user1 self-access
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user1@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
},
},
// user2 self-access
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
// user2 can access tag:shared
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:shared"), tailcfg.PortRangeAny),
},
},
// tag:shared can respond to user2 (return path)
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:shared")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
spec: ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
},
setup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {
t.Helper()
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
nodes, err := headscale.ListNodes("user1")
require.NoError(t, err)
return user2Clients[0], user1Clients[0], nodes[0].GetId()
},
initialAccess: false, // user2 cannot access user1 (no tag)
tagChange: []string{"tag:shared"}, // add tag:shared
finalAccess: true, // user2 can now access user1
},
{
name: "remove-tag-revokes-access",
policy: &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:shared": policyv2.Owners{usernameOwner("user1@")},
"tag:other": policyv2.Owners{usernameOwner("user1@")},
},
ACLs: []policyv2.ACL{
// user2 self-access
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
// user2 can access tag:shared only
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:shared"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:shared")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
spec: ScenarioSpec{
NodesPerUser: 0, // manual creation for tagged node
Users: []string{"user1", "user2"},
},
setup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {
t.Helper()
userMap, err := headscale.MapUsers()
require.NoError(t, err)
// Create user1's node WITH tag:shared via PreAuthKey
taggedKey, err := scenario.CreatePreAuthKeyWithTags(
userMap["user1"].GetId(), false, false, []string{"tag:shared"},
)
require.NoError(t, err)
user1Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())
require.NoError(t, err)
// Create user2's node (untagged)
untaggedKey, err := scenario.CreatePreAuthKey(userMap["user2"].GetId(), false, false)
require.NoError(t, err)
user2Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
nodes, err := headscale.ListNodes("user1")
require.NoError(t, err)
return user2Node, user1Node, nodes[0].GetId()
},
initialAccess: true, // user2 can access user1 (has tag:shared)
tagChange: []string{"tag:other"}, // replace with tag:other
finalAccess: false, // user2 cannot access (no ACL for tag:other)
},
{
name: "change-tag-changes-access",
policy: &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:team-a": policyv2.Owners{usernameOwner("user1@")},
"tag:team-b": policyv2.Owners{usernameOwner("user1@")},
},
ACLs: []policyv2.ACL{
// user2 self-access
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
// user2 can access tag:team-b only (NOT tag:team-a)
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:team-b"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:team-b")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
spec: ScenarioSpec{
NodesPerUser: 0,
Users: []string{"user1", "user2"},
},
setup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {
t.Helper()
userMap, err := headscale.MapUsers()
require.NoError(t, err)
// Create user1's node with tag:team-a (user2 has NO ACL for this)
taggedKey, err := scenario.CreatePreAuthKeyWithTags(
userMap["user1"].GetId(), false, false, []string{"tag:team-a"},
)
require.NoError(t, err)
user1Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())
require.NoError(t, err)
// Create user2's node
untaggedKey, err := scenario.CreatePreAuthKey(userMap["user2"].GetId(), false, false)
require.NoError(t, err)
user2Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
nodes, err := headscale.ListNodes("user1")
require.NoError(t, err)
return user2Node, user1Node, nodes[0].GetId()
},
initialAccess: false, // user2 cannot access (tag:team-a not in ACL)
tagChange: []string{"tag:team-b"}, // change to tag:team-b
finalAccess: true, // user2 can now access (tag:team-b in ACL)
},
{
name: "multiple-tags-partial-removal",
policy: &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:web": policyv2.Owners{usernameOwner("user1@")},
"tag:internal": policyv2.Owners{usernameOwner("user1@")},
},
ACLs: []policyv2.ACL{
// user2 self-access
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
// user2 can access tag:web
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:web"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:web")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
spec: ScenarioSpec{
NodesPerUser: 0,
Users: []string{"user1", "user2"},
},
setup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {
t.Helper()
userMap, err := headscale.MapUsers()
require.NoError(t, err)
// Create user1's node with BOTH tags
taggedKey, err := scenario.CreatePreAuthKeyWithTags(
userMap["user1"].GetId(), false, false, []string{"tag:web", "tag:internal"},
)
require.NoError(t, err)
user1Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())
require.NoError(t, err)
// Create user2's node
untaggedKey, err := scenario.CreatePreAuthKey(userMap["user2"].GetId(), false, false)
require.NoError(t, err)
user2Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
nodes, err := headscale.ListNodes("user1")
require.NoError(t, err)
return user2Node, user1Node, nodes[0].GetId()
},
initialAccess: true, // user2 can access (has tag:web)
tagChange: []string{"tag:internal"}, // remove tag:web, keep tag:internal
finalAccess: false, // user2 cannot access (no ACL for tag:internal)
},
{
name: "tag-change-updates-peer-identity",
policy: &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:server": policyv2.Owners{usernameOwner("user1@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:server"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:server")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
},
spec: ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
},
setup: func(t *testing.T, scenario *Scenario, headscale ControlServer) (TailscaleClient, TailscaleClient, uint64) {
t.Helper()
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
nodes, err := headscale.ListNodes("user1")
require.NoError(t, err)
return user2Clients[0], user1Clients[0], nodes[0].GetId()
},
initialAccess: false, // user2 cannot access user1 (no tag yet)
tagChange: []string{"tag:server"}, // assign tag:server
finalAccess: true, // user2 can now access via tag:server
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
scenario, err := NewScenario(tt.spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(tt.policy),
hsic.WithTestName("acl-tag-"+tt.name),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
// Run test-specific setup
sourceClient, targetClient, targetNodeID := tt.setup(t, scenario, headscale)
targetFQDN, err := targetClient.FQDN()
require.NoError(t, err)
targetURL := fmt.Sprintf("http://%s/etc/hostname", targetFQDN)
// Step 1: Verify initial access state
t.Logf("Step 1: Verifying initial access (expect success=%v)", tt.initialAccess)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := sourceClient.Curl(targetURL)
if tt.initialAccess {
assert.NoError(c, err, "Initial access should succeed")
assert.NotEmpty(c, result, "Initial access should return content")
} else {
assert.Error(c, err, "Initial access should fail")
}
}, 30*time.Second, 500*time.Millisecond, "verifying initial access state")
// Step 1b: Verify initial NetMap visibility
t.Logf("Step 1b: Verifying initial NetMap visibility (expect visible=%v)", tt.initialAccess)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := sourceClient.Status()
assert.NoError(c, err)
targetHostname := targetClient.Hostname()
found := false
for _, peer := range status.Peer {
if strings.Contains(peer.HostName, targetHostname) {
found = true
break
}
}
if tt.initialAccess {
assert.True(c, found, "Target should be visible in NetMap initially")
} else {
assert.False(c, found, "Target should NOT be visible in NetMap initially")
}
}, 30*time.Second, 500*time.Millisecond, "verifying initial NetMap visibility")
// Step 2: Apply tag change
t.Logf("Step 2: Setting tags on node %d to %v", targetNodeID, tt.tagChange)
err = headscale.SetNodeTags(targetNodeID, tt.tagChange)
require.NoError(t, err)
// Verify tag was applied
assert.EventuallyWithT(t, func(c *assert.CollectT) {
// List nodes by iterating through all users since tagged nodes may "move"
var node *v1.Node
for _, user := range tt.spec.Users {
nodes, err := headscale.ListNodes(user)
if err != nil {
continue
}
for _, n := range nodes {
if n.GetId() == targetNodeID {
node = n
break
}
}
}
// Also check nodes without user filter
if node == nil {
// Try listing all nodes
allNodes, _ := headscale.ListNodes("")
for _, n := range allNodes {
if n.GetId() == targetNodeID {
node = n
break
}
}
}
assert.NotNil(c, node, "Node should still exist")
if node != nil {
assert.ElementsMatch(c, tt.tagChange, node.GetTags(), "Tags should be updated")
}
}, 10*time.Second, 500*time.Millisecond, "verifying tag change applied")
// Step 3: Verify final access state (this is the key test for #2389)
t.Logf("Step 3: Verifying final access after tag change (expect success=%v)", tt.finalAccess)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := sourceClient.Curl(targetURL)
if tt.finalAccess {
assert.NoError(c, err, "Final access should succeed after tag change")
assert.NotEmpty(c, result, "Final access should return content")
} else {
assert.Error(c, err, "Final access should fail after tag change")
}
}, 30*time.Second, 500*time.Millisecond, "verifying access propagated after tag change")
// Step 3b: Verify final NetMap visibility
t.Logf("Step 3b: Verifying final NetMap visibility (expect visible=%v)", tt.finalAccess)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := sourceClient.Status()
assert.NoError(c, err)
targetHostname := targetClient.Hostname()
found := false
for _, peer := range status.Peer {
if strings.Contains(peer.HostName, targetHostname) {
found = true
break
}
}
if tt.finalAccess {
assert.True(c, found, "Target should be visible in NetMap after tag change")
} else {
assert.False(c, found, "Target should NOT be visible in NetMap after tag change")
}
}, 60*time.Second, 500*time.Millisecond, "verifying NetMap visibility propagated after tag change")
t.Logf("Test %s PASSED: Tag change propagated correctly", tt.name)
})
}
}
// TestACLTagPropagationPortSpecific validates that tag changes correctly update
// port-specific ACLs. When a tag change restricts access to specific ports,
// the peer should remain visible but only the allowed ports should be accessible.
func TestACLTagPropagationPortSpecific(t *testing.T) {
IntegrationSkip(t)
// Policy: tag:webserver allows port 80, tag:sshonly allows port 22
// When we change from tag:webserver to tag:sshonly, HTTP should fail but ping should still work
policy := &policyv2.Policy{
TagOwners: policyv2.TagOwners{
"tag:webserver": policyv2.Owners{usernameOwner("user1@")},
"tag:sshonly": policyv2.Owners{usernameOwner("user1@")},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
// user2 can access tag:webserver on port 80 only
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:webserver"), tailcfg.PortRange{First: 80, Last: 80}),
},
},
// user2 can access tag:sshonly on port 22 only
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:sshonly"), tailcfg.PortRange{First: 22, Last: 22}),
},
},
// Allow ICMP for ping tests
{
Action: "accept",
Sources: []policyv2.Alias{usernamep("user2@")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:webserver"), tailcfg.PortRangeAny),
aliasWithPorts(tagp("tag:sshonly"), tailcfg.PortRangeAny),
},
Protocol: "icmp",
},
// Return path
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:webserver"), tagp("tag:sshonly")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
},
},
},
}
spec := ScenarioSpec{
NodesPerUser: 0,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl-tag-port-specific"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
userMap, err := headscale.MapUsers()
require.NoError(t, err)
// Create user1's node WITH tag:webserver
taggedKey, err := scenario.CreatePreAuthKeyWithTags(
userMap["user1"].GetId(), false, false, []string{"tag:webserver"},
)
require.NoError(t, err)
user1Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user1Node.Login(headscale.GetEndpoint(), taggedKey.GetKey())
require.NoError(t, err)
// Create user2's node
untaggedKey, err := scenario.CreatePreAuthKey(userMap["user2"].GetId(), false, false)
require.NoError(t, err)
user2Node, err := scenario.CreateTailscaleNode(
"head",
tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]),
tsic.WithDockerEntrypoint([]string{
"/bin/sh", "-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
tsic.WithNetfilter("off"),
)
require.NoError(t, err)
err = user2Node.Login(headscale.GetEndpoint(), untaggedKey.GetKey())
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
nodes, err := headscale.ListNodes("user1")
require.NoError(t, err)
targetNodeID := nodes[0].GetId()
targetFQDN, err := user1Node.FQDN()
require.NoError(t, err)
targetURL := fmt.Sprintf("http://%s/etc/hostname", targetFQDN)
// Step 1: Verify initial state - HTTP on port 80 should work with tag:webserver
t.Log("Step 1: Verifying HTTP access with tag:webserver (should succeed)")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := user2Node.Curl(targetURL)
assert.NoError(c, err, "HTTP should work with tag:webserver")
assert.NotEmpty(c, result)
}, 30*time.Second, 500*time.Millisecond, "initial HTTP access with tag:webserver")
// Step 2: Change tag from webserver to sshonly
t.Logf("Step 2: Changing tag from webserver to sshonly on node %d", targetNodeID)
err = headscale.SetNodeTags(targetNodeID, []string{"tag:sshonly"})
require.NoError(t, err)
// Step 3: Verify peer is still visible in NetMap (partial access, not full removal)
t.Log("Step 3: Verifying peer remains visible in NetMap after tag change")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
status, err := user2Node.Status()
assert.NoError(c, err)
targetHostname := user1Node.Hostname()
found := false
for _, peer := range status.Peer {
if strings.Contains(peer.HostName, targetHostname) {
found = true
break
}
}
assert.True(c, found, "Peer should still be visible with tag:sshonly (port 22 access)")
}, 60*time.Second, 500*time.Millisecond, "peer visibility after tag change")
// Step 4: Verify HTTP on port 80 now fails (tag:sshonly only allows port 22)
t.Log("Step 4: Verifying HTTP access is now blocked (tag:sshonly only allows port 22)")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
_, err := user2Node.Curl(targetURL)
assert.Error(c, err, "HTTP should fail with tag:sshonly (only port 22 allowed)")
}, 60*time.Second, 500*time.Millisecond, "HTTP blocked after tag change to sshonly")
t.Log("Test PASSED: Port-specific ACL changes propagated correctly")
}
// TestACLGroupWithUnknownUser tests issue #2967 where a group containing
// a reference to a non-existent user should not break connectivity for
// valid users in the same group. The expected behavior is that unknown
// users are silently ignored during group resolution.
func TestACLGroupWithUnknownUser(t *testing.T) {
IntegrationSkip(t)
// This test verifies that when a group contains a reference to a
// non-existent user (e.g., "nonexistent@"), the valid users in
// the group should still be able to connect to each other.
//
// Issue: https://github.com/juanfont/headscale/issues/2967
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// Create a policy with a group that includes a non-existent user
// alongside valid users. The group should still work for valid users.
policy := &policyv2.Policy{
Groups: policyv2.Groups{
// This group contains a reference to "nonexistent@" which does not exist
policyv2.Group("group:test"): []policyv2.Username{
policyv2.Username("user1@"),
policyv2.Username("user2@"),
policyv2.Username("nonexistent@"), // This user does not exist
},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:test")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:test"), tailcfg.PortRangeAny),
},
},
},
}
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl-unknown-user"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
require.Len(t, user1Clients, 1)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
require.Len(t, user2Clients, 1)
user1 := user1Clients[0]
user2 := user2Clients[0]
// Get FQDNs for connectivity test
user1FQDN, err := user1.FQDN()
require.NoError(t, err)
user2FQDN, err := user2.FQDN()
require.NoError(t, err)
// Test that user1 can reach user2 (valid users should be able to communicate)
// This is the key assertion for issue #2967: valid users should work
// even if the group contains references to non-existent users.
t.Log("Testing connectivity: user1 -> user2 (should succeed despite unknown user in group)")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
result, err := user1.Curl(url)
assert.NoError(c, err, "user1 should be able to reach user2")
assert.Len(c, result, 13, "expected hostname response")
}, 30*time.Second, 500*time.Millisecond, "user1 should reach user2")
// Test that user2 can reach user1 (bidirectional)
t.Log("Testing connectivity: user2 -> user1 (should succeed despite unknown user in group)")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
result, err := user2.Curl(url)
assert.NoError(c, err, "user2 should be able to reach user1")
assert.Len(c, result, 13, "expected hostname response")
}, 30*time.Second, 500*time.Millisecond, "user2 should reach user1")
t.Log("Test PASSED: Valid users can communicate despite unknown user reference in group")
}
// TestACLGroupAfterUserDeletion tests issue #2967 scenario where a user
// is deleted but their reference remains in an ACL group. The remaining
// valid users should still be able to communicate.
func TestACLGroupAfterUserDeletion(t *testing.T) {
IntegrationSkip(t)
// This test verifies that when a user is deleted from headscale but
// their reference remains in an ACL group, the remaining valid users
// in the group should still be able to connect to each other.
//
// Issue: https://github.com/juanfont/headscale/issues/2967
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2", "user3"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// Create a policy with a group containing all three users
policy := &policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:all"): []policyv2.Username{
policyv2.Username("user1@"),
policyv2.Username("user2@"),
policyv2.Username("user3@"),
},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:all")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:all"), tailcfg.PortRangeAny),
},
},
},
}
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl-deleted-user"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithPolicyMode(types.PolicyModeDB), // Use DB mode so policy persists after user deletion
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
require.Len(t, user1Clients, 1)
user2Clients, err := scenario.ListTailscaleClients("user2")
require.NoError(t, err)
require.Len(t, user2Clients, 1)
user3Clients, err := scenario.ListTailscaleClients("user3")
require.NoError(t, err)
require.Len(t, user3Clients, 1)
user1 := user1Clients[0]
user2 := user2Clients[0]
// Get FQDNs for connectivity test
user1FQDN, err := user1.FQDN()
require.NoError(t, err)
user2FQDN, err := user2.FQDN()
require.NoError(t, err)
// Step 1: Verify initial connectivity - all users can reach each other
t.Log("Step 1: Verifying initial connectivity between all users")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
result, err := user1.Curl(url)
assert.NoError(c, err, "user1 should be able to reach user2 initially")
assert.Len(c, result, 13, "expected hostname response")
}, 30*time.Second, 500*time.Millisecond, "initial user1 -> user2 connectivity")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
result, err := user2.Curl(url)
assert.NoError(c, err, "user2 should be able to reach user1 initially")
assert.Len(c, result, 13, "expected hostname response")
}, 30*time.Second, 500*time.Millisecond, "initial user2 -> user1 connectivity")
// Step 2: Get user3's node and user, then delete them
t.Log("Step 2: Deleting user3's node and user from headscale")
// First, get user3's node ID
nodes, err := headscale.ListNodes("user3")
require.NoError(t, err)
require.Len(t, nodes, 1, "user3 should have exactly one node")
user3NodeID := nodes[0].GetId()
// Delete user3's node first (required before deleting the user)
err = headscale.DeleteNode(user3NodeID)
require.NoError(t, err, "failed to delete user3's node")
// Now get user3's user ID and delete the user
user3, err := GetUserByName(headscale, "user3")
require.NoError(t, err, "user3 should exist")
// Now delete user3 (after their nodes are deleted)
err = headscale.DeleteUser(user3.GetId())
require.NoError(t, err)
// Verify user3 is deleted
_, err = GetUserByName(headscale, "user3")
require.Error(t, err, "user3 should be deleted")
// Step 3: Verify that user1 and user2 can still communicate (before triggering policy refresh)
// The policy still references "user3@" in the group, but since user3 is deleted,
// connectivity may still work due to cached/stale policy state.
t.Log("Step 3: Verifying connectivity still works immediately after user3 deletion (stale cache)")
// Test that user1 can still reach user2
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
result, err := user1.Curl(url)
assert.NoError(c, err, "user1 should still be able to reach user2 after user3 deletion (stale cache)")
assert.Len(c, result, 13, "expected hostname response")
}, 60*time.Second, 500*time.Millisecond, "user1 -> user2 after user3 deletion")
// Step 4: Create a NEW user - this triggers updatePolicyManagerUsers() which
// re-evaluates the policy. According to issue #2967, this is when the bug manifests:
// the deleted user3@ in the group causes the entire group to fail resolution.
t.Log("Step 4: Creating a new user (user4) to trigger policy re-evaluation")
_, err = headscale.CreateUser("user4")
require.NoError(t, err, "failed to create user4")
// Verify user4 was created
_, err = GetUserByName(headscale, "user4")
require.NoError(t, err, "user4 should exist after creation")
// Step 5: THIS IS THE CRITICAL TEST - verify connectivity STILL works after
// creating a new user. Without the fix, the group containing the deleted user3@
// would fail to resolve, breaking connectivity for user1 and user2.
t.Log("Step 5: Verifying connectivity AFTER creating new user (this triggers the bug)")
// Test that user1 can still reach user2 AFTER the policy refresh triggered by user creation
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
result, err := user1.Curl(url)
assert.NoError(c, err, "user1 should still reach user2 after policy refresh (BUG if this fails)")
assert.Len(c, result, 13, "expected hostname response")
}, 60*time.Second, 500*time.Millisecond, "user1 -> user2 after policy refresh (issue #2967)")
// Test that user2 can still reach user1
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user1FQDN)
result, err := user2.Curl(url)
assert.NoError(c, err, "user2 should still reach user1 after policy refresh (BUG if this fails)")
assert.Len(c, result, 13, "expected hostname response")
}, 60*time.Second, 500*time.Millisecond, "user2 -> user1 after policy refresh (issue #2967)")
t.Log("Test PASSED: Remaining users can communicate after deleted user and policy refresh")
}
// TestACLGroupDeletionExactReproduction reproduces issue #2967:
// The bug is that deleting a user from the database (while keeping their
// reference in the ACL) and then creating a new user causes the policy
// to break for everyone in the group.
//
// Steps:
// 0. Create policy with user1@, user2@ in group - connectivity works
// 1. DELETE user2 from DB (not ACL) - connectivity may still work (stale)
// 2. MODIFY ACL: Add user3@ to group - connectivity continues
// 3. CREATE new user user4 - connectivity STOPS (BUG).
func TestACLGroupDeletionExactReproduction(t *testing.T) {
IntegrationSkip(t)
// Issue: https://github.com/juanfont/headscale/issues/2967
const userToDelete = "user2"
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", userToDelete},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
// Step 0: Create initial policy with BOTH user1@ and user2@ in group
initialPolicy := &policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:all"): []policyv2.Username{
policyv2.Username("user1@"),
policyv2.Username("user2@"),
},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:all")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:all"), tailcfg.PortRangeAny),
},
},
},
}
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithPackages("curl"),
tsic.WithWebserver(80),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(initialPolicy),
hsic.WithTestName("acl-exact-repro"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithPolicyMode(types.PolicyModeDB),
)
require.NoError(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
require.NoError(t, err)
err = scenario.WaitForTailscaleSync()
require.NoError(t, err)
headscale, err := scenario.Headscale()
require.NoError(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
require.NoError(t, err)
require.Len(t, user1Clients, 1)
user1 := user1Clients[0]
user2Clients, err := scenario.ListTailscaleClients(userToDelete)
require.NoError(t, err)
require.Len(t, user2Clients, 1)
user2Client := user2Clients[0]
user2FQDN, err := user2Client.FQDN()
require.NoError(t, err)
// Verify initial connectivity
t.Log("Step 0: Verifying initial connectivity")
assert.EventuallyWithT(t, func(c *assert.CollectT) {
url := fmt.Sprintf("http://%s/etc/hostname", user2FQDN)
result, err := user1.Curl(url)
assert.NoError(c, err, "Step 0: user1 should reach user2 initially")
assert.Len(c, result, 13, "expected hostname response")
}, 60*time.Second, 500*time.Millisecond, "Step 0: user1 -> user2")
t.Log("Step 0: PASSED - initial connectivity works")
// Step 1: DELETE user2 from DB (but NOT from ACL)
t.Log("Step 1: Deleting user2 from database (NOT from ACL)")
// First delete user2's node
nodes, err := headscale.ListNodes(userToDelete)
require.NoError(t, err)
require.Len(t, nodes, 1)
err = headscale.DeleteNode(nodes[0].GetId())
require.NoError(t, err)
// Get user2's ID and delete the user
users, err := headscale.ListUsers()
require.NoError(t, err)
var user2ID uint64
for _, u := range users {
if u.GetName() == userToDelete {
user2ID = u.GetId()
break
}
}
require.NotZero(t, user2ID)
err = headscale.DeleteUser(user2ID)
require.NoError(t, err)
t.Log("Step 2: user2 deleted from DB - ACL still references user2@")
// Step 3: MODIFY ACL - Add user3@ to the group (user3 exists in DB)
t.Log("Step 3: Creating user3 and modifying ACL to add user3@ to group:all")
// Create user3 first
user3, err := headscale.CreateUser("user3")
require.NoError(t, err)
require.NotNil(t, user3)
// Now modify ACL to include user3
modifiedPolicy2 := &policyv2.Policy{
Groups: policyv2.Groups{
policyv2.Group("group:all"): []policyv2.Username{
policyv2.Username("user1@"),
policyv2.Username("user2@"), // Still here - user2 is DELETED but still in ACL
policyv2.Username("user3@"), // Added user3
},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:all")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:all"), tailcfg.PortRangeAny),
},
},
},
}
err = headscale.SetPolicy(modifiedPolicy2)
require.NoError(t, err)
t.Log("Step 3: ACL modified - group:all now has user1@, user2@ (deleted), user3@")
// Step 4: CREATE a completely new user (user4) - this should trigger the bug
t.Log("Step 4: Creating new user (user4) - THIS TRIGGERS THE BUG according to issue #2967")
user4, err := headscale.CreateUser("user4")
require.NoError(t, err)
require.NotNil(t, user4)
// Step 5: Verify user1 can still be reached (the bug would break this)
// According to the issue, after creating a new user, connectivity STOPS
t.Log("Step 5: Verifying user1 is still reachable (BUG: this should FAIL without fix)")
// We need to test that user1 can still function
// Since user2 is gone, we test that user1's node is still healthy
// by checking if we can list nodes and user1 is there
assert.EventuallyWithT(t, func(c *assert.CollectT) {
nodesAfter, err := headscale.ListNodes("user1")
assert.NoError(c, err)
assert.Len(c, nodesAfter, 1, "user1 should still have a node after user4 creation")
}, 30*time.Second, 500*time.Millisecond, "user1 node should exist")
// Check the filter rules - if the bug occurs, the filter becomes EMPTY (0 rules)
// This is the key assertion that would fail without the fix
filter, err := headscale.DebugFilter()
require.NoError(t, err)
t.Logf("Filter rules after user4 creation: %d rules", len(filter))
// BUG MANIFESTATION: Without the fix, filter rules drop to 0
// because the policy manager fails to resolve "user2@" which no longer exists
require.NotEmpty(t, filter, "BUG #2967: Filter rules should NOT be empty - if this fails, the bug is reproduced")
// The real test: user1's node should still be able to accept connections
// Since user2 is deleted and user3/user4 have no nodes yet, we verify
// that user1's status is healthy
status, err := user1.Status()
require.NoError(t, err)
require.NotNil(t, status.Self, "user1 should have valid self status")
t.Log("Test PASSED: Filter rules are intact and user1 is healthy after user4 creation")
t.Log("NOTE: The original bug caused 'pinging to stop' after creating a new user")
}