diff --git a/.github/workflows/publish-image.yaml b/.github/workflows/publish-image.yaml
index d34954cf8f57e..363d5c661a297 100644
--- a/.github/workflows/publish-image.yaml
+++ b/.github/workflows/publish-image.yaml
@@ -57,7 +57,7 @@ jobs:
- name: Publish k8s-operator
shell: bash
run: |
- REPOS="ghcr.io/${{ github.repository }}/k8s-operator" TARGET="operator" ./build_docker.sh
+ REPOS="ghcr.io/${{ github.repository }}/k8s-operator" TARGET="k8s-operator" ./build_docker.sh
- name: Publish k8s-nameserver
shell: bash
diff --git a/Makefile b/Makefile
index 98c3d36cc1c9e..960f13885c11c 100644
--- a/Makefile
+++ b/Makefile
@@ -100,7 +100,7 @@ publishdevoperator: ## Build and publish k8s-operator image to location specifie
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
- TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=operator ./build_docker.sh
+ TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-operator ./build_docker.sh
publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
diff --git a/VERSION.txt b/VERSION.txt
index 294b29f8a603f..2ea5ecd85abfc 100644
--- a/VERSION.txt
+++ b/VERSION.txt
@@ -1 +1 @@
-1.76.6
+1.78.3
diff --git a/build_docker.sh b/build_docker.sh
index 1cbdc4b9ef8e8..f9632ea0a06d3 100755
--- a/build_docker.sh
+++ b/build_docker.sh
@@ -17,12 +17,20 @@ eval "$(./build_dist.sh shellvars)"
DEFAULT_TARGET="client"
DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}"
DEFAULT_BASE="tailscale/alpine-base:3.18"
+# Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked
+# Github repo to find release notes for any new image tags. Note that for official Tailscale images the default
+# annotations defined here will be overriden by release scripts that call this script.
+# https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys
+DEFAULT_ANNOTATIONS="org.opencontainers.image.source=https://github.com/tailscale/tailscale/blob/main/build_docker.sh,org.opencontainers.image.vendor=Tailscale"
PUSH="${PUSH:-false}"
TARGET="${TARGET:-${DEFAULT_TARGET}}"
TAGS="${TAGS:-${DEFAULT_TAGS}}"
BASE="${BASE:-${DEFAULT_BASE}}"
PLATFORM="${PLATFORM:-}" # default to all platforms
+# OCI annotations that will be added to the image.
+# https://github.com/opencontainers/image-spec/blob/main/annotations.md
+ANNOTATIONS="${ANNOTATIONS:-${DEFAULT_ANNOTATIONS}}"
case "$TARGET" in
client)
@@ -43,9 +51,10 @@ case "$TARGET" in
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
+ --annotations="${ANNOTATIONS}" \
/usr/local/bin/containerboot
;;
- operator)
+ k8s-operator)
DEFAULT_REPOS="tailscale/k8s-operator"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
@@ -56,9 +65,11 @@ case "$TARGET" in
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
+ --gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
+ --annotations="${ANNOTATIONS}" \
/usr/local/bin/operator
;;
k8s-nameserver)
@@ -72,9 +83,11 @@ case "$TARGET" in
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
+ --gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
+ --annotations="${ANNOTATIONS}" \
/usr/local/bin/k8s-nameserver
;;
*)
diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go
index df51dc1cab52c..34c094a63fbf9 100644
--- a/client/tailscale/localclient.go
+++ b/client/tailscale/localclient.go
@@ -40,6 +40,7 @@ import (
"tailscale.com/types/dnstype"
"tailscale.com/types/key"
"tailscale.com/types/tkatype"
+ "tailscale.com/util/syspolicy/setting"
)
// defaultLocalClient is the default LocalClient when using the legacy
@@ -492,6 +493,17 @@ func (lc *LocalClient) DebugAction(ctx context.Context, action string) error {
return nil
}
+// DebugActionBody invokes a debug action with a body parameter, such as
+// "debug-force-prefer-derp".
+// These are development tools and subject to change or removal over time.
+func (lc *LocalClient) DebugActionBody(ctx context.Context, action string, rbody io.Reader) error {
+ body, err := lc.send(ctx, "POST", "/localapi/v0/debug?action="+url.QueryEscape(action), 200, rbody)
+ if err != nil {
+ return fmt.Errorf("error %w: %s", err, body)
+ }
+ return nil
+}
+
// DebugResultJSON invokes a debug action and returns its result as something JSON-able.
// These are development tools and subject to change or removal over time.
func (lc *LocalClient) DebugResultJSON(ctx context.Context, action string) (any, error) {
@@ -814,6 +826,33 @@ func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn
return decodeJSON[*ipn.Prefs](body)
}
+// GetEffectivePolicy returns the effective policy for the specified scope.
+func (lc *LocalClient) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) {
+ scopeID, err := scope.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID))
+ if err != nil {
+ return nil, err
+ }
+ return decodeJSON[*setting.Snapshot](body)
+}
+
+// ReloadEffectivePolicy reloads the effective policy for the specified scope
+// by reading and merging policy settings from all applicable policy sources.
+func (lc *LocalClient) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) {
+ scopeID, err := scope.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody)
+ if err != nil {
+ return nil, err
+ }
+ return decodeJSON[*setting.Snapshot](body)
+}
+
// GetDNSOSConfig returns the system DNS configuration for the current device.
// That is, it returns the DNS configuration that the system would use if Tailscale weren't being used.
func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) {
@@ -1299,6 +1338,17 @@ func (lc *LocalClient) SetServeConfig(ctx context.Context, config *ipn.ServeConf
return nil
}
+// DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be
+// run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over
+// to another replica whilst there is still some grace period for the existing connections to terminate.
+func (lc *LocalClient) DisconnectControl(ctx context.Context) error {
+ _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/disconnect-control", 200, nil, nil)
+ if err != nil {
+ return fmt.Errorf("error disconnecting control: %w", err)
+ }
+ return nil
+}
+
// NetworkLockDisable shuts down network-lock across the tailnet.
func (lc *LocalClient) NetworkLockDisable(ctx context.Context, secret []byte) error {
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil {
diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go
index 8945619653c5d..8533b47129e01 100644
--- a/client/tailscale/tailscale.go
+++ b/client/tailscale/tailscale.go
@@ -51,6 +51,9 @@ type Client struct {
// HTTPClient optionally specifies an alternate HTTP client to use.
// If nil, http.DefaultClient is used.
HTTPClient *http.Client
+
+ // UserAgent optionally specifies an alternate User-Agent header
+ UserAgent string
}
func (c *Client) httpClient() *http.Client {
@@ -97,8 +100,9 @@ func (c *Client) setAuth(r *http.Request) {
// and can be changed manually by the user.
func NewClient(tailnet string, auth AuthMethod) *Client {
return &Client{
- tailnet: tailnet,
- auth: auth,
+ tailnet: tailnet,
+ auth: auth,
+ UserAgent: "tailscale-client-oss",
}
}
@@ -110,17 +114,16 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable")
}
c.setAuth(req)
+ if c.UserAgent != "" {
+ req.Header.Set("User-Agent", c.UserAgent)
+ }
return c.httpClient().Do(req)
}
// sendRequest add the authentication key to the request and sends it. It
// receives the response and reads up to 10MB of it.
func (c *Client) sendRequest(req *http.Request) ([]byte, *http.Response, error) {
- if !I_Acknowledge_This_API_Is_Unstable {
- return nil, nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable")
- }
- c.setAuth(req)
- resp, err := c.httpClient().Do(req)
+ resp, err := c.Do(req)
if err != nil {
return nil, resp, err
}
diff --git a/client/web/web.go b/client/web/web.go
index 04ba2d086334a..56c5c92e808bb 100644
--- a/client/web/web.go
+++ b/client/web/web.go
@@ -26,6 +26,7 @@ import (
"tailscale.com/client/tailscale/apitype"
"tailscale.com/clientupdate"
"tailscale.com/envknob"
+ "tailscale.com/envknob/featureknob"
"tailscale.com/hostinfo"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
@@ -960,37 +961,16 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) {
}
func availableFeatures() map[string]bool {
- env := hostinfo.GetEnvType()
features := map[string]bool{
"advertise-exit-node": true, // available on all platforms
"advertise-routes": true, // available on all platforms
- "use-exit-node": canUseExitNode(env) == nil,
- "ssh": envknob.CanRunTailscaleSSH() == nil,
+ "use-exit-node": featureknob.CanUseExitNode() == nil,
+ "ssh": featureknob.CanRunTailscaleSSH() == nil,
"auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(),
}
- if env == hostinfo.HomeAssistantAddOn {
- // Setting SSH on Home Assistant causes trouble on startup
- // (since the flag is not being passed to `tailscale up`).
- // Although Tailscale SSH does work here,
- // it's not terribly useful since it's running in a separate container.
- features["ssh"] = false
- }
return features
}
-func canUseExitNode(env hostinfo.EnvType) error {
- switch dist := distro.Get(); dist {
- case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995
- distro.QNAP,
- distro.Unraid:
- return fmt.Errorf("Tailscale exit nodes cannot be used on %s.", dist)
- }
- if env == hostinfo.HomeAssistantAddOn {
- return errors.New("Tailscale exit nodes cannot be used on Home Assistant.")
- }
- return nil
-}
-
// aclsAllowAccess returns whether tailnet ACLs (as expressed in the provided filter rules)
// permit any devices to access the local web client.
// This does not currently check whether a specific device can connect, just any device.
diff --git a/cmd/checkmetrics/checkmetrics.go b/cmd/checkmetrics/checkmetrics.go
new file mode 100644
index 0000000000000..fb9e8ab4c61ec
--- /dev/null
+++ b/cmd/checkmetrics/checkmetrics.go
@@ -0,0 +1,131 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// checkmetrics validates that all metrics in the tailscale client-metrics
+// are documented in a given path or URL.
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "time"
+
+ "tailscale.com/ipn/store/mem"
+ "tailscale.com/tsnet"
+ "tailscale.com/tstest/integration/testcontrol"
+ "tailscale.com/util/httpm"
+)
+
+var (
+ kbPath = flag.String("kb-path", "", "filepath to the client-metrics knowledge base")
+ kbUrl = flag.String("kb-url", "", "URL to the client-metrics knowledge base page")
+)
+
+func main() {
+ flag.Parse()
+ if *kbPath == "" && *kbUrl == "" {
+ log.Fatalf("either -kb-path or -kb-url must be set")
+ }
+
+ var control testcontrol.Server
+ ts := httptest.NewServer(&control)
+ defer ts.Close()
+
+ td, err := os.MkdirTemp("", "testcontrol")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(td)
+
+ // tsnet is used not used as a Tailscale client, but as a way to
+ // boot up Tailscale, have all the metrics registered, and then
+ // verifiy that all the metrics are documented.
+ tsn := &tsnet.Server{
+ Dir: td,
+ Store: new(mem.Store),
+ UserLogf: log.Printf,
+ Ephemeral: true,
+ ControlURL: ts.URL,
+ }
+ if err := tsn.Start(); err != nil {
+ log.Fatal(err)
+ }
+ defer tsn.Close()
+
+ log.Printf("checking that all metrics are documented, looking for: %s", tsn.Sys().UserMetricsRegistry().MetricNames())
+
+ if *kbPath != "" {
+ kb, err := readKB(*kbPath)
+ if err != nil {
+ log.Fatalf("reading kb: %v", err)
+ }
+ missing := undocumentedMetrics(kb, tsn.Sys().UserMetricsRegistry().MetricNames())
+
+ if len(missing) > 0 {
+ log.Fatalf("found undocumented metrics in %q: %v", *kbPath, missing)
+ }
+ }
+
+ if *kbUrl != "" {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ kb, err := getKB(ctx, *kbUrl)
+ if err != nil {
+ log.Fatalf("getting kb: %v", err)
+ }
+ missing := undocumentedMetrics(kb, tsn.Sys().UserMetricsRegistry().MetricNames())
+
+ if len(missing) > 0 {
+ log.Fatalf("found undocumented metrics in %q: %v", *kbUrl, missing)
+ }
+ }
+}
+
+func readKB(path string) (string, error) {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return "", fmt.Errorf("reading file: %w", err)
+ }
+
+ return string(b), nil
+}
+
+func getKB(ctx context.Context, url string) (string, error) {
+ req, err := http.NewRequestWithContext(ctx, httpm.GET, url, nil)
+ if err != nil {
+ return "", fmt.Errorf("creating request: %w", err)
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("getting kb page: %w", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("reading body: %w", err)
+ }
+ return string(b), nil
+}
+
+func undocumentedMetrics(b string, metrics []string) []string {
+ var missing []string
+ for _, metric := range metrics {
+ if !strings.Contains(b, metric) {
+ missing = append(missing, metric)
+ }
+ }
+ return missing
+}
diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go
index fb7fccd968816..895290733cf5f 100644
--- a/cmd/containerboot/healthz.go
+++ b/cmd/containerboot/healthz.go
@@ -7,7 +7,6 @@ package main
import (
"log"
- "net"
"net/http"
"sync"
)
@@ -23,29 +22,29 @@ type healthz struct {
func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.Lock()
defer h.Unlock()
+
if h.hasAddrs {
w.Write([]byte("ok"))
} else {
- http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError)
+ http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable)
}
}
-// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the
-// provided address. A containerized tailscale instance is considered healthy if
-// it has at least one tailnet IP address.
-func runHealthz(addr string, h *healthz) {
- lis, err := net.Listen("tcp", addr)
- if err != nil {
- log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err)
+func (h *healthz) update(healthy bool) {
+ h.Lock()
+ defer h.Unlock()
+
+ if h.hasAddrs != healthy {
+ log.Println("Setting healthy", healthy)
}
- mux := http.NewServeMux()
- mux.Handle("/healthz", h)
- log.Printf("Running healthcheck endpoint at %s/healthz", addr)
- hs := &http.Server{Handler: mux}
-
- go func() {
- if err := hs.Serve(lis); err != nil {
- log.Fatalf("failed running health endpoint: %v", err)
- }
- }()
+ h.hasAddrs = healthy
+}
+
+// healthHandlers registers a simple health handler at /healthz.
+// A containerized tailscale instance is considered healthy if
+// it has at least one tailnet IP address.
+func healthHandlers(mux *http.ServeMux) *healthz {
+ h := &healthz{}
+ mux.Handle("GET /healthz", h)
+ return h
}
diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go
index 908cc01efc25a..4d00687ee4566 100644
--- a/cmd/containerboot/kube.go
+++ b/cmd/containerboot/kube.go
@@ -9,30 +9,56 @@ import (
"context"
"encoding/json"
"fmt"
- "log"
"net/http"
"net/netip"
"os"
"tailscale.com/kube/kubeapi"
"tailscale.com/kube/kubeclient"
+ "tailscale.com/kube/kubetypes"
"tailscale.com/tailcfg"
)
-// storeDeviceID writes deviceID to 'device_id' data field of the named
-// Kubernetes Secret.
-func storeDeviceID(ctx context.Context, secretName string, deviceID tailcfg.StableNodeID) error {
+// kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use
+// this rather than any of the upstream Kubernetes client libaries to avoid extra imports.
+type kubeClient struct {
+ kubeclient.Client
+ stateSecret string
+ canPatch bool // whether the client has permissions to patch Kubernetes Secrets
+}
+
+func newKubeClient(root string, stateSecret string) (*kubeClient, error) {
+ if root != "/" {
+ // If we are running in a test, we need to set the root path to the fake
+ // service account directory.
+ kubeclient.SetRootPathForTesting(root)
+ }
+ var err error
+ kc, err := kubeclient.New("tailscale-container")
+ if err != nil {
+ return nil, fmt.Errorf("Error creating kube client: %w", err)
+ }
+ if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
+ // Derive the API server address from the environment variables
+ // Used to set http server in tests, or optionally enabled by flag
+ kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
+ }
+ return &kubeClient{Client: kc, stateSecret: stateSecret}, nil
+}
+
+// storeDeviceID writes deviceID to 'device_id' data field of the client's state Secret.
+func (kc *kubeClient) storeDeviceID(ctx context.Context, deviceID tailcfg.StableNodeID) error {
s := &kubeapi.Secret{
Data: map[string][]byte{
- "device_id": []byte(deviceID),
+ kubetypes.KeyDeviceID: []byte(deviceID),
},
}
- return kc.StrategicMergePatchSecret(ctx, secretName, s, "tailscale-container")
+ return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
}
-// storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields
-// 'device_ips', 'device_fqdn' of the named Kubernetes Secret.
-func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, addresses []netip.Prefix) error {
+// storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields 'device_ips', 'device_fqdn' of client's
+// state Secret.
+func (kc *kubeClient) storeDeviceEndpoints(ctx context.Context, fqdn string, addresses []netip.Prefix) error {
var ips []string
for _, addr := range addresses {
ips = append(ips, addr.Addr().String())
@@ -44,16 +70,28 @@ func storeDeviceEndpoints(ctx context.Context, secretName string, fqdn string, a
s := &kubeapi.Secret{
Data: map[string][]byte{
- "device_fqdn": []byte(fqdn),
- "device_ips": deviceIPs,
+ kubetypes.KeyDeviceFQDN: []byte(fqdn),
+ kubetypes.KeyDeviceIPs: deviceIPs,
},
}
- return kc.StrategicMergePatchSecret(ctx, secretName, s, "tailscale-container")
+ return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
+}
+
+// storeHTTPSEndpoint writes an HTTPS endpoint exposed by this device via 'tailscale serve' to the client's state
+// Secret. In practice this will be the same value that gets written to 'device_fqdn', but this should only be called
+// when the serve config has been successfully set up.
+func (kc *kubeClient) storeHTTPSEndpoint(ctx context.Context, ep string) error {
+ s := &kubeapi.Secret{
+ Data: map[string][]byte{
+ kubetypes.KeyHTTPSEndpoint: []byte(ep),
+ },
+ }
+ return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
}
// deleteAuthKey deletes the 'authkey' field of the given kube
// secret. No-op if there is no authkey in the secret.
-func deleteAuthKey(ctx context.Context, secretName string) error {
+func (kc *kubeClient) deleteAuthKey(ctx context.Context) error {
// m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902.
m := []kubeclient.JSONPatch{
{
@@ -61,7 +99,7 @@ func deleteAuthKey(ctx context.Context, secretName string) error {
Path: "/data/authkey",
},
}
- if err := kc.JSONPatchSecret(ctx, secretName, m); err != nil {
+ if err := kc.JSONPatchResource(ctx, kc.stateSecret, kubeclient.TypeSecrets, m); err != nil {
if s, ok := err.(*kubeapi.Status); ok && s.Code == http.StatusUnprocessableEntity {
// This is kubernetes-ese for "the field you asked to
// delete already doesn't exist", aka no-op.
@@ -72,22 +110,19 @@ func deleteAuthKey(ctx context.Context, secretName string) error {
return nil
}
-var kc kubeclient.Client
-
-func initKubeClient(root string) {
- if root != "/" {
- // If we are running in a test, we need to set the root path to the fake
- // service account directory.
- kubeclient.SetRootPathForTesting(root)
+// storeCapVerUID stores the current capability version of tailscale and, if provided, UID of the Pod in the tailscale
+// state Secret.
+// These two fields are used by the Kubernetes Operator to observe the current capability version of tailscaled running in this container.
+func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error {
+ capVerS := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion)
+ d := map[string][]byte{
+ kubetypes.KeyCapVer: []byte(capVerS),
}
- var err error
- kc, err = kubeclient.New()
- if err != nil {
- log.Fatalf("Error creating kube client: %v", err)
+ if podUID != "" {
+ d[kubetypes.KeyPodUID] = []byte(podUID)
}
- if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" {
- // Derive the API server address from the environment variables
- // Used to set http server in tests, or optionally enabled by flag
- kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
+ s := &kubeapi.Secret{
+ Data: d,
}
+ return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container")
}
diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go
index 1a5730548838f..2ba69af7c0f57 100644
--- a/cmd/containerboot/kube_test.go
+++ b/cmd/containerboot/kube_test.go
@@ -21,7 +21,7 @@ func TestSetupKube(t *testing.T) {
cfg *settings
wantErr bool
wantCfg *settings
- kc kubeclient.Client
+ kc *kubeClient
}{
{
name: "TS_AUTHKEY set, state Secret exists",
@@ -29,14 +29,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo",
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, nil
},
- },
+ }},
wantCfg: &settings{
AuthKey: "foo",
KubeSecret: "foo",
@@ -48,14 +48,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo",
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, true, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 404}
},
- },
+ }},
wantCfg: &settings{
AuthKey: "foo",
KubeSecret: "foo",
@@ -67,14 +67,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo",
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 404}
},
- },
+ }},
wantCfg: &settings{
AuthKey: "foo",
KubeSecret: "foo",
@@ -87,14 +87,14 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo",
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 403}
},
- },
+ }},
wantCfg: &settings{
AuthKey: "foo",
KubeSecret: "foo",
@@ -111,11 +111,11 @@ func TestSetupKube(t *testing.T) {
AuthKey: "foo",
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, errors.New("broken")
},
- },
+ }},
wantErr: true,
},
{
@@ -127,14 +127,14 @@ func TestSetupKube(t *testing.T) {
wantCfg: &settings{
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, true, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return nil, &kubeapi.Status{Code: 404}
},
- },
+ }},
},
{
// Interactive login using URL in Pod logs
@@ -145,28 +145,28 @@ func TestSetupKube(t *testing.T) {
wantCfg: &settings{
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{}, nil
},
- },
+ }},
},
{
name: "TS_AUTHKEY not set, state Secret contains auth key, we do not have RBAC to patch it",
cfg: &settings{
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return false, false, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil
},
- },
+ }},
wantCfg: &settings{
KubeSecret: "foo",
},
@@ -177,14 +177,14 @@ func TestSetupKube(t *testing.T) {
cfg: &settings{
KubeSecret: "foo",
},
- kc: &kubeclient.FakeClient{
+ kc: &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{
CheckSecretPermissionsImpl: func(context.Context, string) (bool, bool, error) {
return true, false, nil
},
GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) {
return &kubeapi.Secret{Data: map[string][]byte{"authkey": []byte("foo")}}, nil
},
- },
+ }},
wantCfg: &settings{
KubeSecret: "foo",
AuthKey: "foo",
@@ -194,9 +194,9 @@ func TestSetupKube(t *testing.T) {
}
for _, tt := range tests {
- kc = tt.kc
+ kc := tt.kc
t.Run(tt.name, func(t *testing.T) {
- if err := tt.cfg.setupKube(context.Background()); (err != nil) != tt.wantErr {
+ if err := tt.cfg.setupKube(context.Background(), kc); (err != nil) != tt.wantErr {
t.Errorf("settings.setupKube() error = %v, wantErr %v", err, tt.wantErr)
}
if diff := cmp.Diff(*tt.cfg, *tt.wantCfg); diff != "" {
diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go
index 4c8ba58073c69..7411ea9496cfd 100644
--- a/cmd/containerboot/main.go
+++ b/cmd/containerboot/main.go
@@ -52,11 +52,17 @@
// ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN.
// It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes,
// and will be re-applied when it changes.
-// - TS_HEALTHCHECK_ADDR_PORT: if specified, an HTTP health endpoint will be
-// served at /healthz at the provided address, which should be in form [
]:.
-// If not set, no health check will be run. If set to :, addr will default to 0.0.0.0
-// The health endpoint will return 200 OK if this node has at least one tailnet IP address,
-// otherwise returns 503.
+// - TS_HEALTHCHECK_ADDR_PORT: deprecated, use TS_ENABLE_HEALTH_CHECK instead and optionally
+// set TS_LOCAL_ADDR_PORT. Will be removed in 1.82.0.
+// - TS_LOCAL_ADDR_PORT: the address and port to serve local metrics and health
+// check endpoints if enabled via TS_ENABLE_METRICS and/or TS_ENABLE_HEALTH_CHECK.
+// Defaults to [::]:9002, serving on all available interfaces.
+// - TS_ENABLE_METRICS: if true, a metrics endpoint will be served at /metrics on
+// the address specified by TS_LOCAL_ADDR_PORT. See https://tailscale.com/kb/1482/client-metrics
+// for more information on the metrics exposed.
+// - TS_ENABLE_HEALTH_CHECK: if true, a health check endpoint will be served at /healthz on
+// the address specified by TS_LOCAL_ADDR_PORT. The health endpoint will return 200
+// OK if this node has at least one tailnet IP address, otherwise returns 503.
// NB: the health criteria might change in the future.
// - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a
// directory that containers tailscaled config in file. The config file needs to be
@@ -99,10 +105,10 @@ import (
"log"
"math"
"net"
+ "net/http"
"net/netip"
"os"
"os/signal"
- "path"
"path/filepath"
"slices"
"strings"
@@ -115,6 +121,7 @@ import (
"tailscale.com/client/tailscale"
"tailscale.com/ipn"
kubeutils "tailscale.com/k8s-operator"
+ "tailscale.com/kube/kubetypes"
"tailscale.com/tailcfg"
"tailscale.com/types/logger"
"tailscale.com/types/ptr"
@@ -161,9 +168,13 @@ func main() {
bootCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
+ var kc *kubeClient
if cfg.InKubernetes {
- initKubeClient(cfg.Root)
- if err := cfg.setupKube(bootCtx); err != nil {
+ kc, err = newKubeClient(cfg.Root, cfg.KubeSecret)
+ if err != nil {
+ log.Fatalf("error initializing kube client: %v", err)
+ }
+ if err := cfg.setupKube(bootCtx, kc); err != nil {
log.Fatalf("error setting up for running on Kubernetes: %v", err)
}
}
@@ -179,6 +190,34 @@ func main() {
}
defer killTailscaled()
+ var healthCheck *healthz
+ if cfg.HealthCheckAddrPort != "" {
+ mux := http.NewServeMux()
+
+ log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort)
+ healthCheck = healthHandlers(mux)
+
+ close := runHTTPServer(mux, cfg.HealthCheckAddrPort)
+ defer close()
+ }
+
+ if cfg.localMetricsEnabled() || cfg.localHealthEnabled() {
+ mux := http.NewServeMux()
+
+ if cfg.localMetricsEnabled() {
+ log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort)
+ metricsHandlers(mux, client, cfg.DebugAddrPort)
+ }
+
+ if cfg.localHealthEnabled() {
+ log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort)
+ healthCheck = healthHandlers(mux)
+ }
+
+ close := runHTTPServer(mux, cfg.LocalAddrPort)
+ defer close()
+ }
+
if cfg.EnableForwardingOptimizations {
if err := client.SetUDPGROForwarding(bootCtx); err != nil {
log.Printf("[unexpected] error enabling UDP GRO forwarding: %v", err)
@@ -285,12 +324,18 @@ authLoop:
}
}
+ // Remove any serve config and advertised HTTPS endpoint that may have been set by a previous run of
+ // containerboot, but only if we're providing a new one.
if cfg.ServeConfigPath != "" {
- // Remove any serve config that may have been set by a previous run of
- // containerboot, but only if we're providing a new one.
+ log.Printf("serve proxy: unsetting previous config")
if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil {
log.Fatalf("failed to unset serve config: %v", err)
}
+ if hasKubeStateStore(cfg) {
+ if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil {
+ log.Fatalf("failed to update HTTPS endpoint in tailscale state: %v", err)
+ }
+ }
}
if hasKubeStateStore(cfg) && isTwoStepConfigAuthOnce(cfg) {
@@ -298,11 +343,17 @@ authLoop:
// authkey is no longer needed. We don't strictly need to
// wipe it, but it's good hygiene.
log.Printf("Deleting authkey from kube secret")
- if err := deleteAuthKey(ctx, cfg.KubeSecret); err != nil {
+ if err := kc.deleteAuthKey(ctx); err != nil {
log.Fatalf("deleting authkey from kube secret: %v", err)
}
}
+ if hasKubeStateStore(cfg) {
+ if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil {
+ log.Fatalf("storing capability version and UID: %v", err)
+ }
+ }
+
w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
if err != nil {
log.Fatalf("rewatching tailscaled for updates after auth: %v", err)
@@ -322,12 +373,9 @@ authLoop:
certDomain = new(atomic.Pointer[string])
certDomainChanged = make(chan bool, 1)
- h = &healthz{} // http server for the healthz endpoint
- healthzRunner = sync.OnceFunc(func() { runHealthz(cfg.HealthCheckAddrPort, h) })
+ triggerWatchServeConfigChanges sync.Once
)
- if cfg.ServeConfigPath != "" {
- go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client)
- }
+
var nfr linuxfw.NetfilterRunner
if isL3Proxy(cfg) {
nfr, err = newNetfilterRunner(log.Printf)
@@ -428,7 +476,7 @@ runLoop:
// fails.
deviceID := n.NetMap.SelfNode.StableID()
if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceID, &deviceID) {
- if err := storeDeviceID(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID()); err != nil {
+ if err := kc.storeDeviceID(ctx, n.NetMap.SelfNode.StableID()); err != nil {
log.Fatalf("storing device ID in Kubernetes Secret: %v", err)
}
}
@@ -501,8 +549,11 @@ runLoop:
resetTimer(false)
backendAddrs = newBackendAddrs
}
- if cfg.ServeConfigPath != "" && len(n.NetMap.DNS.CertDomains) != 0 {
- cd := n.NetMap.DNS.CertDomains[0]
+ if cfg.ServeConfigPath != "" {
+ cd := certDomainFromNetmap(n.NetMap)
+ if cd == "" {
+ cd = kubetypes.ValueNoHTTPS
+ }
prev := certDomain.Swap(ptr.To(cd))
if prev == nil || *prev != cd {
select {
@@ -544,17 +595,21 @@ runLoop:
// TODO (irbekrm): instead of using the IP and FQDN, have some other mechanism for the proxy signal that it is 'Ready'.
deviceEndpoints := []any{n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses()}
if hasKubeStateStore(cfg) && deephash.Update(¤tDeviceEndpoints, &deviceEndpoints) {
- if err := storeDeviceEndpoints(ctx, cfg.KubeSecret, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
+ if err := kc.storeDeviceEndpoints(ctx, n.NetMap.SelfNode.Name(), n.NetMap.SelfNode.Addresses().AsSlice()); err != nil {
log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err)
}
}
- if cfg.HealthCheckAddrPort != "" {
- h.Lock()
- h.hasAddrs = len(addrs) != 0
- h.Unlock()
- healthzRunner()
+ if healthCheck != nil {
+ healthCheck.update(len(addrs) != 0)
}
+
+ if cfg.ServeConfigPath != "" {
+ triggerWatchServeConfigChanges.Do(func() {
+ go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client, kc)
+ })
+ }
+
if egressSvcsNotify != nil {
egressSvcsNotify <- n
}
@@ -731,7 +786,6 @@ func tailscaledConfigFilePath() string {
}
cv, err := kubeutils.CapVerFromFileName(e.Name())
if err != nil {
- log.Printf("skipping file %q in tailscaled config directory %q: %v", e.Name(), dir, err)
continue
}
if cv > maxCompatVer && cv <= tailcfg.CurrentCapabilityVersion {
@@ -739,8 +793,28 @@ func tailscaledConfigFilePath() string {
}
}
if maxCompatVer == -1 {
- log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion)
+ log.Fatalf("no tailscaled config file found in %q for current capability version %d", dir, tailcfg.CurrentCapabilityVersion)
+ }
+ filePath := filepath.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer))
+ log.Printf("Using tailscaled config file %q to match current capability version %d", filePath, tailcfg.CurrentCapabilityVersion)
+ return filePath
+}
+
+func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) {
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ log.Fatalf("failed to listen on addr %q: %v", addr, err)
+ }
+ srv := &http.Server{Handler: mux}
+
+ go func() {
+ if err := srv.Serve(ln); err != nil {
+ log.Fatalf("failed running server: %v", err)
+ }
+ }()
+
+ return func() error {
+ err := srv.Shutdown(context.Background())
+ return errors.Join(err, ln.Close())
}
- log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion)
- return path.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer))
}
diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go
index 5c92787ce6079..83e001b62c09e 100644
--- a/cmd/containerboot/main_test.go
+++ b/cmd/containerboot/main_test.go
@@ -101,6 +101,26 @@ func TestContainerBoot(t *testing.T) {
argFile := filepath.Join(d, "args")
runningSockPath := filepath.Join(d, "tmp/tailscaled.sock")
+ var localAddrPort, healthAddrPort int
+ for _, p := range []*int{&localAddrPort, &healthAddrPort} {
+ ln, err := net.Listen("tcp", ":0")
+ if err != nil {
+ t.Fatalf("Failed to open listener: %v", err)
+ }
+ if err := ln.Close(); err != nil {
+ t.Fatalf("Failed to close listener: %v", err)
+ }
+ port := ln.Addr().(*net.TCPAddr).Port
+ *p = port
+ }
+ metricsURL := func(port int) string {
+ return fmt.Sprintf("http://127.0.0.1:%d/metrics", port)
+ }
+ healthURL := func(port int) string {
+ return fmt.Sprintf("http://127.0.0.1:%d/healthz", port)
+ }
+
+ capver := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion)
type phase struct {
// If non-nil, send this IPN bus notification (and remember it as the
@@ -119,6 +139,8 @@ func TestContainerBoot(t *testing.T) {
// WantFatalLog is the fatal log message we expect from containerboot.
// If set for a phase, the test will finish on that phase.
WantFatalLog string
+
+ EndpointStatuses map[string]int
}
runningNotify := &ipn.Notify{
State: ptr.To(ipn.Running),
@@ -147,6 +169,11 @@ func TestContainerBoot(t *testing.T) {
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
},
+ // No metrics or health by default.
+ EndpointStatuses: map[string]int{
+ metricsURL(9002): -1,
+ healthURL(9002): -1,
+ },
},
{
Notify: runningNotify,
@@ -453,10 +480,11 @@ func TestContainerBoot(t *testing.T) {
{
Notify: runningNotify,
WantKubeSecret: map[string]string{
- "authkey": "tskey-key",
- "device_fqdn": "test-node.test.ts.net",
- "device_id": "myID",
- "device_ips": `["100.64.0.1"]`,
+ "authkey": "tskey-key",
+ "device_fqdn": "test-node.test.ts.net",
+ "device_id": "myID",
+ "device_ips": `["100.64.0.1"]`,
+ "tailscale_capver": capver,
},
},
},
@@ -546,9 +574,10 @@ func TestContainerBoot(t *testing.T) {
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false",
},
WantKubeSecret: map[string]string{
- "device_fqdn": "test-node.test.ts.net",
- "device_id": "myID",
- "device_ips": `["100.64.0.1"]`,
+ "device_fqdn": "test-node.test.ts.net",
+ "device_id": "myID",
+ "device_ips": `["100.64.0.1"]`,
+ "tailscale_capver": capver,
},
},
},
@@ -575,10 +604,11 @@ func TestContainerBoot(t *testing.T) {
{
Notify: runningNotify,
WantKubeSecret: map[string]string{
- "authkey": "tskey-key",
- "device_fqdn": "test-node.test.ts.net",
- "device_id": "myID",
- "device_ips": `["100.64.0.1"]`,
+ "authkey": "tskey-key",
+ "device_fqdn": "test-node.test.ts.net",
+ "device_id": "myID",
+ "device_ips": `["100.64.0.1"]`,
+ "tailscale_capver": capver,
},
},
{
@@ -593,10 +623,11 @@ func TestContainerBoot(t *testing.T) {
},
},
WantKubeSecret: map[string]string{
- "authkey": "tskey-key",
- "device_fqdn": "new-name.test.ts.net",
- "device_id": "newID",
- "device_ips": `["100.64.0.1"]`,
+ "authkey": "tskey-key",
+ "device_fqdn": "new-name.test.ts.net",
+ "device_id": "newID",
+ "device_ips": `["100.64.0.1"]`,
+ "tailscale_capver": capver,
},
},
},
@@ -700,6 +731,104 @@ func TestContainerBoot(t *testing.T) {
},
},
},
+ {
+ Name: "metrics_enabled",
+ Env: map[string]string{
+ "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
+ "TS_ENABLE_METRICS": "true",
+ },
+ Phases: []phase{
+ {
+ WantCmds: []string{
+ "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
+ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
+ },
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): 200,
+ healthURL(localAddrPort): -1,
+ },
+ }, {
+ Notify: runningNotify,
+ },
+ },
+ },
+ {
+ Name: "health_enabled",
+ Env: map[string]string{
+ "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
+ "TS_ENABLE_HEALTH_CHECK": "true",
+ },
+ Phases: []phase{
+ {
+ WantCmds: []string{
+ "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
+ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
+ },
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): -1,
+ healthURL(localAddrPort): 503, // Doesn't start passing until the next phase.
+ },
+ }, {
+ Notify: runningNotify,
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): -1,
+ healthURL(localAddrPort): 200,
+ },
+ },
+ },
+ },
+ {
+ Name: "metrics_and_health_on_same_port",
+ Env: map[string]string{
+ "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
+ "TS_ENABLE_METRICS": "true",
+ "TS_ENABLE_HEALTH_CHECK": "true",
+ },
+ Phases: []phase{
+ {
+ WantCmds: []string{
+ "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
+ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
+ },
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): 200,
+ healthURL(localAddrPort): 503, // Doesn't start passing until the next phase.
+ },
+ }, {
+ Notify: runningNotify,
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): 200,
+ healthURL(localAddrPort): 200,
+ },
+ },
+ },
+ },
+ {
+ Name: "local_metrics_and_deprecated_health",
+ Env: map[string]string{
+ "TS_LOCAL_ADDR_PORT": fmt.Sprintf("[::]:%d", localAddrPort),
+ "TS_ENABLE_METRICS": "true",
+ "TS_HEALTHCHECK_ADDR_PORT": fmt.Sprintf("[::]:%d", healthAddrPort),
+ },
+ Phases: []phase{
+ {
+ WantCmds: []string{
+ "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
+ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false",
+ },
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): 200,
+ healthURL(healthAddrPort): 503, // Doesn't start passing until the next phase.
+ },
+ }, {
+ Notify: runningNotify,
+ EndpointStatuses: map[string]int{
+ metricsURL(localAddrPort): 200,
+ healthURL(healthAddrPort): 200,
+ },
+ },
+ },
+ },
}
for _, test := range tests {
@@ -796,7 +925,26 @@ func TestContainerBoot(t *testing.T) {
return nil
})
if err != nil {
- t.Fatal(err)
+ t.Fatalf("phase %d: %v", i, err)
+ }
+
+ for url, want := range p.EndpointStatuses {
+ err := tstest.WaitFor(2*time.Second, func() error {
+ resp, err := http.Get(url)
+ if err != nil && want != -1 {
+ return fmt.Errorf("GET %s: %v", url, err)
+ }
+ if want > 0 && resp.StatusCode != want {
+ defer resp.Body.Close()
+ body, _ := io.ReadAll(resp.Body)
+ return fmt.Errorf("GET %s, want %d, got %d\n%s", url, want, resp.StatusCode, string(body))
+ }
+
+ return nil
+ })
+ if err != nil {
+ t.Fatalf("phase %d: %v", i, err)
+ }
}
}
waitLogLine(t, 2*time.Second, cbOut, "Startup complete, waiting for shutdown signal")
@@ -955,6 +1103,12 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
panic(fmt.Sprintf("unsupported method %q", r.Method))
}
+ case "/localapi/v0/usermetrics":
+ if r.Method != "GET" {
+ panic(fmt.Sprintf("unsupported method %q", r.Method))
+ }
+ w.Write([]byte("fake metrics"))
+ return
default:
panic(fmt.Sprintf("unsupported path %q", r.URL.Path))
}
diff --git a/cmd/containerboot/metrics.go b/cmd/containerboot/metrics.go
new file mode 100644
index 0000000000000..a8b9222a5ab1f
--- /dev/null
+++ b/cmd/containerboot/metrics.go
@@ -0,0 +1,79 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build linux
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "tailscale.com/client/tailscale"
+ "tailscale.com/client/tailscale/apitype"
+)
+
+// metrics is a simple metrics HTTP server, if enabled it forwards requests to
+// the tailscaled's LocalAPI usermetrics endpoint at /localapi/v0/usermetrics.
+type metrics struct {
+ debugEndpoint string
+ lc *tailscale.LocalClient
+}
+
+func proxy(w http.ResponseWriter, r *http.Request, url string, do func(*http.Request) (*http.Response, error)) {
+ req, err := http.NewRequestWithContext(r.Context(), r.Method, url, r.Body)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("failed to construct request: %s", err), http.StatusInternalServerError)
+ return
+ }
+ req.Header = r.Header.Clone()
+
+ resp, err := do(req)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("failed to proxy request: %s", err), http.StatusInternalServerError)
+ return
+ }
+ defer resp.Body.Close()
+
+ for key, val := range resp.Header {
+ for _, v := range val {
+ w.Header().Add(key, v)
+ }
+ }
+ w.WriteHeader(resp.StatusCode)
+ if _, err := io.Copy(w, resp.Body); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+func (m *metrics) handleMetrics(w http.ResponseWriter, r *http.Request) {
+ localAPIURL := "http://" + apitype.LocalAPIHost + "/localapi/v0/usermetrics"
+ proxy(w, r, localAPIURL, m.lc.DoLocalRequest)
+}
+
+func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) {
+ if m.debugEndpoint == "" {
+ http.Error(w, "debug endpoint not configured", http.StatusNotFound)
+ return
+ }
+
+ debugURL := "http://" + m.debugEndpoint + r.URL.Path
+ proxy(w, r, debugURL, http.DefaultClient.Do)
+}
+
+// metricsHandlers registers a simple HTTP metrics handler at /metrics, forwarding
+// requests to tailscaled's /localapi/v0/usermetrics API.
+//
+// In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug
+// endpoint if configured to ease migration for a breaking change serving user
+// metrics instead of debug metrics on the "metrics" port.
+func metricsHandlers(mux *http.ServeMux, lc *tailscale.LocalClient, debugAddrPort string) {
+ m := &metrics{
+ lc: lc,
+ debugEndpoint: debugAddrPort,
+ }
+
+ mux.HandleFunc("GET /metrics", m.handleMetrics)
+ mux.HandleFunc("/debug/", m.handleDebug) // TODO(tomhjp): Remove for 1.82.0 release.
+}
diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go
index 6c22b3eeb651e..14c7f00d7450f 100644
--- a/cmd/containerboot/serve.go
+++ b/cmd/containerboot/serve.go
@@ -19,6 +19,8 @@ import (
"github.com/fsnotify/fsnotify"
"tailscale.com/client/tailscale"
"tailscale.com/ipn"
+ "tailscale.com/kube/kubetypes"
+ "tailscale.com/types/netmap"
)
// watchServeConfigChanges watches path for changes, and when it sees one, reads
@@ -26,21 +28,21 @@ import (
// applies it to lc. It exits when ctx is canceled. cdChanged is a channel that
// is written to when the certDomain changes, causing the serve config to be
// re-read and applied.
-func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient) {
+func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient, kc *kubeClient) {
if certDomainAtomic == nil {
- panic("cd must not be nil")
+ panic("certDomainAtomic must not be nil")
}
var tickChan <-chan time.Time
var eventChan <-chan fsnotify.Event
if w, err := fsnotify.NewWatcher(); err != nil {
- log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err)
+ log.Printf("serve proxy: failed to create fsnotify watcher, timer-only mode: %v", err)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
tickChan = ticker.C
} else {
defer w.Close()
if err := w.Add(filepath.Dir(path)); err != nil {
- log.Fatalf("failed to add fsnotify watch: %v", err)
+ log.Fatalf("serve proxy: failed to add fsnotify watch: %v", err)
}
eventChan = w.Events
}
@@ -59,24 +61,62 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan
// k8s handles these mounts. So just re-read the file and apply it
// if it's changed.
}
- if certDomain == "" {
- continue
- }
sc, err := readServeConfig(path, certDomain)
if err != nil {
- log.Fatalf("failed to read serve config: %v", err)
+ log.Fatalf("serve proxy: failed to read serve config: %v", err)
}
if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) {
continue
}
- log.Printf("Applying serve config")
- if err := lc.SetServeConfig(ctx, sc); err != nil {
- log.Fatalf("failed to set serve config: %v", err)
+ validateHTTPSServe(certDomain, sc)
+ if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil {
+ log.Fatalf("serve proxy: error updating serve config: %v", err)
+ }
+ if kc != nil && kc.canPatch {
+ if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil {
+ log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err)
+ }
}
prevServeConfig = sc
}
}
+func certDomainFromNetmap(nm *netmap.NetworkMap) string {
+ if len(nm.DNS.CertDomains) == 0 {
+ return ""
+ }
+ return nm.DNS.CertDomains[0]
+}
+
+func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc *tailscale.LocalClient) error {
+ // TODO(irbekrm): This means that serve config that does not expose HTTPS endpoint will not be set for a tailnet
+ // that does not have HTTPS enabled. We probably want to fix this.
+ if certDomain == kubetypes.ValueNoHTTPS {
+ return nil
+ }
+ log.Printf("serve proxy: applying serve config")
+ return lc.SetServeConfig(ctx, sc)
+}
+
+func validateHTTPSServe(certDomain string, sc *ipn.ServeConfig) {
+ if certDomain != kubetypes.ValueNoHTTPS || !hasHTTPSEndpoint(sc) {
+ return
+ }
+ log.Printf(
+ `serve proxy: this node is configured as a proxy that exposes an HTTPS endpoint to tailnet,
+ (perhaps a Kubernetes operator Ingress proxy) but it is not able to issue TLS certs, so this will likely not work.
+ To make it work, ensure that HTTPS is enabled for your tailnet, see https://tailscale.com/kb/1153/enabling-https for more details.`)
+}
+
+func hasHTTPSEndpoint(cfg *ipn.ServeConfig) bool {
+ for _, tcpCfg := range cfg.TCP {
+ if tcpCfg.HTTPS {
+ return true
+ }
+ }
+ return false
+}
+
// readServeConfig reads the ipn.ServeConfig from path, replacing
// ${TS_CERT_DOMAIN} with certDomain.
func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) {
diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go
index 4da7286b7ca0a..aed00250d001e 100644
--- a/cmd/containerboot/services.go
+++ b/cmd/containerboot/services.go
@@ -389,7 +389,7 @@ func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Sta
Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices),
Value: bs,
}
- if err := ep.kc.JSONPatchSecret(ctx, ep.stateSecret, []kubeclient.JSONPatch{patch}); err != nil {
+ if err := ep.kc.JSONPatchResource(ctx, ep.stateSecret, kubeclient.TypeSecrets, []kubeclient.JSONPatch{patch}); err != nil {
return fmt.Errorf("error patching state Secret: %w", err)
}
ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice()
diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go
index 742713e7700de..cc8641909dafe 100644
--- a/cmd/containerboot/settings.go
+++ b/cmd/containerboot/settings.go
@@ -67,7 +67,12 @@ type settings struct {
PodIP string
PodIPv4 string
PodIPv6 string
+ PodUID string
HealthCheckAddrPort string
+ LocalAddrPort string
+ MetricsEnabled bool
+ HealthCheckEnabled bool
+ DebugAddrPort string
EgressSvcsCfgPath string
}
@@ -98,7 +103,12 @@ func configFromEnv() (*settings, error) {
PodIP: defaultEnv("POD_IP", ""),
EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false),
HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""),
+ LocalAddrPort: defaultEnv("TS_LOCAL_ADDR_PORT", "[::]:9002"),
+ MetricsEnabled: defaultBool("TS_ENABLE_METRICS", false),
+ HealthCheckEnabled: defaultBool("TS_ENABLE_HEALTH_CHECK", false),
+ DebugAddrPort: defaultEnv("TS_DEBUG_ADDR_PORT", ""),
EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""),
+ PodUID: defaultEnv("POD_UID", ""),
}
podIPs, ok := os.LookupEnv("POD_IPS")
if ok {
@@ -171,17 +181,31 @@ func (s *settings) validate() error {
return errors.New("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS is not supported in userspace mode")
}
if s.HealthCheckAddrPort != "" {
+ log.Printf("[warning] TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0. Please use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT instead.")
if _, err := netip.ParseAddrPort(s.HealthCheckAddrPort); err != nil {
- return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err)
+ return fmt.Errorf("error parsing TS_HEALTHCHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err)
}
}
+ if s.localMetricsEnabled() || s.localHealthEnabled() {
+ if _, err := netip.ParseAddrPort(s.LocalAddrPort); err != nil {
+ return fmt.Errorf("error parsing TS_LOCAL_ADDR_PORT value %q: %w", s.LocalAddrPort, err)
+ }
+ }
+ if s.DebugAddrPort != "" {
+ if _, err := netip.ParseAddrPort(s.DebugAddrPort); err != nil {
+ return fmt.Errorf("error parsing TS_DEBUG_ADDR_PORT value %q: %w", s.DebugAddrPort, err)
+ }
+ }
+ if s.HealthCheckEnabled && s.HealthCheckAddrPort != "" {
+ return errors.New("TS_HEALTHCHECK_ADDR_PORT is deprecated and will be removed in 1.82.0, use TS_ENABLE_HEALTH_CHECK and optionally TS_LOCAL_ADDR_PORT")
+ }
return nil
}
// setupKube is responsible for doing any necessary configuration and checks to
// ensure that tailscale state storage and authentication mechanism will work on
// Kubernetes.
-func (cfg *settings) setupKube(ctx context.Context) error {
+func (cfg *settings) setupKube(ctx context.Context, kc *kubeClient) error {
if cfg.KubeSecret == "" {
return nil
}
@@ -190,6 +214,7 @@ func (cfg *settings) setupKube(ctx context.Context) error {
return fmt.Errorf("some Kubernetes permissions are missing, please check your RBAC configuration: %v", err)
}
cfg.KubernetesCanPatch = canPatch
+ kc.canPatch = canPatch
s, err := kc.GetSecret(ctx, cfg.KubeSecret)
if err != nil {
@@ -272,6 +297,14 @@ func hasKubeStateStore(cfg *settings) bool {
return cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != ""
}
+func (cfg *settings) localMetricsEnabled() bool {
+ return cfg.LocalAddrPort != "" && cfg.MetricsEnabled
+}
+
+func (cfg *settings) localHealthEnabled() bool {
+ return cfg.LocalAddrPort != "" && cfg.HealthCheckEnabled
+}
+
// defaultEnv returns the value of the given envvar name, or defVal if
// unset.
func defaultEnv(name, defVal string) string {
diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go
index 53fb7e703be45..d8da49b033d06 100644
--- a/cmd/containerboot/tailscaled.go
+++ b/cmd/containerboot/tailscaled.go
@@ -90,6 +90,12 @@ func tailscaledArgs(cfg *settings) []string {
if cfg.TailscaledConfigFilePath != "" {
args = append(args, "--config="+cfg.TailscaledConfigFilePath)
}
+ // Once enough proxy versions have been released for all the supported
+ // versions to understand this cfg setting, the operator can stop
+ // setting TS_TAILSCALED_EXTRA_ARGS for the debug flag.
+ if cfg.DebugAddrPort != "" && !strings.Contains(cfg.DaemonExtraArgs, cfg.DebugAddrPort) {
+ args = append(args, "--debug="+cfg.DebugAddrPort)
+ }
if cfg.DaemonExtraArgs != "" {
args = append(args, strings.Fields(cfg.DaemonExtraArgs)...)
}
diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go
index db84aa515d257..623fa376f452c 100644
--- a/cmd/derper/cert.go
+++ b/cmd/derper/cert.go
@@ -8,6 +8,7 @@ import (
"crypto/x509"
"errors"
"fmt"
+ "net"
"net/http"
"path/filepath"
"regexp"
@@ -53,8 +54,9 @@ func certProviderByCertMode(mode, dir, hostname string) (certProvider, error) {
}
type manualCertManager struct {
- cert *tls.Certificate
- hostname string
+ cert *tls.Certificate
+ hostname string // hostname or IP address of server
+ noHostname bool // whether hostname is an IP address
}
// NewManualCertManager returns a cert provider which read certificate by given hostname on create.
@@ -74,7 +76,11 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) {
if err := x509Cert.VerifyHostname(hostname); err != nil {
return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err)
}
- return &manualCertManager{cert: &cert, hostname: hostname}, nil
+ return &manualCertManager{
+ cert: &cert,
+ hostname: hostname,
+ noHostname: net.ParseIP(hostname) != nil,
+ }, nil
}
func (m *manualCertManager) TLSConfig() *tls.Config {
@@ -88,7 +94,7 @@ func (m *manualCertManager) TLSConfig() *tls.Config {
}
func (m *manualCertManager) getCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
- if hi.ServerName != m.hostname {
+ if hi.ServerName != m.hostname && !m.noHostname {
return nil, fmt.Errorf("cert mismatch with hostname: %q", hi.ServerName)
}
diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go
new file mode 100644
index 0000000000000..a379e5c04c32e
--- /dev/null
+++ b/cmd/derper/cert_test.go
@@ -0,0 +1,97 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package main
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "math/big"
+ "net"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+)
+
+// Verify that in --certmode=manual mode, we can use a bare IP address
+// as the --hostname and that GetCertificate will return it.
+func TestCertIP(t *testing.T) {
+ dir := t.TempDir()
+ const hostname = "1.2.3.4"
+
+ priv, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ip := net.ParseIP(hostname)
+ if ip == nil {
+ t.Fatalf("invalid IP address %q", hostname)
+ }
+ template := &x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ Organization: []string{"Tailscale Test Corp"},
+ },
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(30 * 24 * time.Hour),
+
+ KeyUsage: x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ IPAddresses: []net.IP{ip},
+ }
+ derBytes, err := x509.CreateCertificate(rand.Reader, template, template, &priv.PublicKey, priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ certOut, err := os.Create(filepath.Join(dir, hostname+".crt"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
+ t.Fatalf("Failed to write data to cert.pem: %v", err)
+ }
+ if err := certOut.Close(); err != nil {
+ t.Fatalf("Error closing cert.pem: %v", err)
+ }
+
+ keyOut, err := os.OpenFile(filepath.Join(dir, hostname+".key"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
+ if err != nil {
+ t.Fatalf("Unable to marshal private key: %v", err)
+ }
+ if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil {
+ t.Fatalf("Failed to write data to key.pem: %v", err)
+ }
+ if err := keyOut.Close(); err != nil {
+ t.Fatalf("Error closing key.pem: %v", err)
+ }
+
+ cp, err := certProviderByCertMode("manual", dir, hostname)
+ if err != nil {
+ t.Fatal(err)
+ }
+ back, err := cp.TLSConfig().GetCertificate(&tls.ClientHelloInfo{
+ ServerName: "", // no SNI
+ })
+ if err != nil {
+ t.Fatalf("GetCertificate: %v", err)
+ }
+ if back == nil {
+ t.Fatalf("GetCertificate returned nil")
+ }
+}
diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt
index 417dbcfb0deb7..076074f2554a1 100644
--- a/cmd/derper/depaware.txt
+++ b/cmd/derper/depaware.txt
@@ -27,7 +27,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
L github.com/google/nftables/expr from github.com/google/nftables+
L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+
L github.com/google/nftables/xt from github.com/google/nftables/expr+
- github.com/google/uuid from tailscale.com/util/fastuuid
github.com/hdevalence/ed25519consensus from tailscale.com/tka
L github.com/josharian/native from github.com/mdlayher/netlink+
L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon
@@ -113,9 +112,10 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/net/stunserver from tailscale.com/cmd/derper
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/derp/derphttp
+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/ipn+
💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+
- tailscale.com/net/wsconn from tailscale.com/cmd/derper+
+ tailscale.com/net/wsconn from tailscale.com/cmd/derper
tailscale.com/paths from tailscale.com/client/tailscale
💣 tailscale.com/safesocket from tailscale.com/client/tailscale
tailscale.com/syncs from tailscale.com/cmd/derper+
@@ -139,6 +139,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/types/persist from tailscale.com/ipn
tailscale.com/types/preftype from tailscale.com/ipn
tailscale.com/types/ptr from tailscale.com/hostinfo+
+ tailscale.com/types/result from tailscale.com/util/lineiter
tailscale.com/types/structs from tailscale.com/ipn+
tailscale.com/types/tkatype from tailscale.com/client/tailscale+
tailscale.com/types/views from tailscale.com/ipn+
@@ -150,24 +151,29 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
tailscale.com/util/dnsname from tailscale.com/hostinfo+
- tailscale.com/util/fastuuid from tailscale.com/tsweb
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
tailscale.com/util/httpm from tailscale.com/client/tailscale
- tailscale.com/util/lineread from tailscale.com/hostinfo+
+ tailscale.com/util/lineiter from tailscale.com/hostinfo+
L tailscale.com/util/linuxfw from tailscale.com/net/netns
tailscale.com/util/mak from tailscale.com/health+
tailscale.com/util/multierr from tailscale.com/health+
tailscale.com/util/nocasemaps from tailscale.com/types/ipproto
+ tailscale.com/util/rands from tailscale.com/tsweb
tailscale.com/util/set from tailscale.com/derp+
tailscale.com/util/singleflight from tailscale.com/net/dnscache
tailscale.com/util/slicesx from tailscale.com/cmd/derper+
tailscale.com/util/syspolicy from tailscale.com/ipn
tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+
- tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy
- tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy
+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+
+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source
+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy
+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+
+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+
+ tailscale.com/util/testenv from tailscale.com/util/syspolicy+
tailscale.com/util/usermetric from tailscale.com/health
tailscale.com/util/vizerror from tailscale.com/tailcfg+
W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+
+ W 💣 tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
tailscale.com/version from tailscale.com/derp+
tailscale.com/version/distro from tailscale.com/envknob+
@@ -188,7 +194,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
golang.org/x/crypto/sha3 from crypto/internal/mlkem768+
W golang.org/x/exp/constraints from tailscale.com/util/winutil
- golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting
+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+
L golang.org/x/net/bpf from github.com/mdlayher/netlink+
golang.org/x/net/dns/dnsmessage from net+
golang.org/x/net/http/httpguts from net/http
@@ -237,7 +243,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
crypto/tls from golang.org/x/crypto/acme+
crypto/x509 from crypto/tls+
crypto/x509/pkix from crypto/x509+
- database/sql/driver from github.com/google/uuid
embed from crypto/internal/nistec+
encoding from encoding/json+
encoding/asn1 from crypto/x509+
@@ -249,7 +254,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
encoding/pem from crypto/tls+
errors from bufio+
expvar from github.com/prometheus/client_golang/prometheus+
- flag from tailscale.com/cmd/derper
+ flag from tailscale.com/cmd/derper+
fmt from compress/flate+
go/token from google.golang.org/protobuf/internal/strs
hash from crypto+
@@ -257,6 +262,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
hash/fnv from google.golang.org/protobuf/internal/detrand
hash/maphash from go4.org/mem
html from net/http/pprof+
+ html/template from tailscale.com/cmd/derper
io from bufio+
io/fs from crypto/x509+
io/ioutil from github.com/mitchellh/go-ps+
@@ -268,7 +274,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
math/big from crypto/dsa+
math/bits from compress/flate+
math/rand from github.com/mdlayher/netlink+
- math/rand/v2 from tailscale.com/util/fastuuid+
+ math/rand/v2 from internal/concurrent+
mime from github.com/prometheus/common/expfmt+
mime/multipart from net/http
mime/quotedprintable from mime/multipart
@@ -283,7 +289,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
os from crypto/rand+
os/exec from github.com/coreos/go-iptables/iptables+
os/signal from tailscale.com/cmd/derper
- W os/user from tailscale.com/util/winutil
+ W os/user from tailscale.com/util/winutil+
path from github.com/prometheus/client_golang/prometheus/internal+
path/filepath from crypto/x509+
reflect from crypto/x509+
@@ -301,6 +307,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
sync/atomic from context+
syscall from crypto/rand+
text/tabwriter from runtime/pprof
+ text/template from html/template
+ text/template/parse from html/template+
time from compress/gzip+
unicode from bytes+
unicode/utf16 from crypto/x509+
diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go
index 80c9dc44f138f..6e24e0ab14b3d 100644
--- a/cmd/derper/derper.go
+++ b/cmd/derper/derper.go
@@ -19,6 +19,7 @@ import (
"expvar"
"flag"
"fmt"
+ "html/template"
"io"
"log"
"math"
@@ -57,7 +58,7 @@ var (
configPath = flag.String("c", "", "config file path")
certMode = flag.String("certmode", "letsencrypt", "mode for getting a cert. possible options: manual, letsencrypt")
certDir = flag.String("certdir", tsweb.DefaultCertDir("derper-certs"), "directory to store LetsEncrypt certs, if addr's port is :443")
- hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443")
+ hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks")
runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.")
runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.")
@@ -212,25 +213,16 @@ func main() {
tsweb.AddBrowserHeaders(w)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(200)
- io.WriteString(w, `
-DERP
-
- This is a Tailscale DERP server.
-
-
- Documentation:
-
-
-`)
- if !*runDERP {
- io.WriteString(w, `Status: disabled
`)
- }
- if tsweb.AllowDebugAccess(r) {
- io.WriteString(w, "Debug info at /debug/.
\n")
+ err := homePageTemplate.Execute(w, templateData{
+ ShowAbuseInfo: validProdHostname.MatchString(*hostname),
+ Disabled: !*runDERP,
+ AllowDebug: tsweb.AllowDebugAccess(r),
+ })
+ if err != nil {
+ if r.Context().Err() == nil {
+ log.Printf("homePageTemplate.Execute: %v", err)
+ }
+ return
}
}))
mux.Handle("/robots.txt", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -468,3 +460,52 @@ func init() {
return 0
}))
}
+
+type templateData struct {
+ ShowAbuseInfo bool
+ Disabled bool
+ AllowDebug bool
+}
+
+// homePageTemplate renders the home page using [templateData].
+var homePageTemplate = template.Must(template.New("home").Parse(`
+DERP
+
+ This is a Tailscale DERP server.
+
+
+
+ It provides STUN, interactive connectivity establishment, and relaying of end-to-end encrypted traffic
+ for Tailscale clients.
+
+
+{{if .ShowAbuseInfo }}
+
+ If you suspect abuse, please contact security@tailscale.com.
+
+{{end}}
+
+
+ Documentation:
+
+
+
+
+{{if .Disabled}}
+Status: disabled
+{{end}}
+
+{{if .AllowDebug}}
+Debug info at /debug/.
+{{end}}
+
+
+`))
diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go
index 553a78f9f6426..6dce1fcdfebdd 100644
--- a/cmd/derper/derper_test.go
+++ b/cmd/derper/derper_test.go
@@ -4,6 +4,7 @@
package main
import (
+ "bytes"
"context"
"net/http"
"net/http/httptest"
@@ -107,6 +108,33 @@ func TestDeps(t *testing.T) {
"gvisor.dev/gvisor/pkg/tcpip/header": "https://github.com/tailscale/tailscale/issues/9756",
"tailscale.com/net/packet": "not needed in derper",
"github.com/gaissmai/bart": "not needed in derper",
+ "database/sql/driver": "not needed in derper", // previously came in via github.com/google/uuid
},
}.Check(t)
}
+
+func TestTemplate(t *testing.T) {
+ buf := &bytes.Buffer{}
+ err := homePageTemplate.Execute(buf, templateData{
+ ShowAbuseInfo: true,
+ Disabled: true,
+ AllowDebug: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ str := buf.String()
+ if !strings.Contains(str, "If you suspect abuse") {
+ t.Error("Output is missing abuse mailto")
+ }
+ if !strings.Contains(str, "Tailscale Security Policies") {
+ t.Error("Output is missing Tailscale Security Policies link")
+ }
+ if !strings.Contains(str, "Status:") {
+ t.Error("Output is missing disabled status")
+ }
+ if !strings.Contains(str, "Debug info") {
+ t.Error("Output is missing debug info")
+ }
+}
diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go
index 1d0ec32c3c064..8f04326b03980 100644
--- a/cmd/derpprobe/derpprobe.go
+++ b/cmd/derpprobe/derpprobe.go
@@ -29,6 +29,7 @@ var (
tlsInterval = flag.Duration("tls-interval", 15*time.Second, "TLS probe interval")
bwInterval = flag.Duration("bw-interval", 0, "bandwidth probe interval (0 = no bandwidth probing)")
bwSize = flag.Int64("bw-probe-size-bytes", 1_000_000, "bandwidth probe size")
+ regionCode = flag.String("region-code", "", "probe only this region (e.g. 'lax'); if left blank, all regions will be probed")
)
func main() {
@@ -47,6 +48,9 @@ func main() {
if *bwInterval > 0 {
opts = append(opts, prober.WithBandwidthProbing(*bwInterval, *bwSize))
}
+ if *regionCode != "" {
+ opts = append(opts, prober.WithRegion(*regionCode))
+ }
dp, err := prober.DERP(p, *derpMapURL, opts...)
if err != nil {
log.Fatal(err)
@@ -75,6 +79,11 @@ func main() {
prober.WithPageLink("Prober metrics", "/debug/varz"),
prober.WithProbeLink("Run Probe", "/debug/probe-run?name={{.Name}}"),
), tsweb.HandlerOptions{Logf: log.Printf}))
+ mux.Handle("/healthz", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("ok\n"))
+ }))
log.Printf("Listening on %s", *listen)
log.Fatal(http.ListenAndServe(*listen, mux))
}
diff --git a/cmd/get-authkey/main.go b/cmd/get-authkey/main.go
index d8030252c0081..777258d64b21b 100644
--- a/cmd/get-authkey/main.go
+++ b/cmd/get-authkey/main.go
@@ -51,6 +51,7 @@ func main() {
ctx := context.Background()
tsClient := tailscale.NewClient("-", nil)
+ tsClient.UserAgent = "tailscale-get-authkey"
tsClient.HTTPClient = credentials.Client(ctx)
tsClient.BaseURL = baseURL
diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go
index 016166b4cda29..c243036cbabd9 100644
--- a/cmd/k8s-operator/connector.go
+++ b/cmd/k8s-operator/connector.go
@@ -10,10 +10,12 @@ import (
"fmt"
"net/netip"
"slices"
+ "strings"
"sync"
"time"
- "github.com/pkg/errors"
+ "errors"
+
"go.uber.org/zap"
xslices "golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
@@ -34,6 +36,7 @@ import (
const (
reasonConnectorCreationFailed = "ConnectorCreationFailed"
+ reasonConnectorCreating = "ConnectorCreating"
reasonConnectorCreated = "ConnectorCreated"
reasonConnectorInvalid = "ConnectorInvalid"
@@ -58,6 +61,7 @@ type ConnectorReconciler struct {
subnetRouters set.Slice[types.UID] // for subnet routers gauge
exitNodes set.Slice[types.UID] // for exit nodes gauge
+ appConnectors set.Slice[types.UID] // for app connectors gauge
}
var (
@@ -67,6 +71,8 @@ var (
gaugeConnectorSubnetRouterResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithSubnetRouterCount)
// gaugeConnectorExitNodeResources tracks the number of Connectors currently managed by this operator instance that are exit nodes.
gaugeConnectorExitNodeResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithExitNodeCount)
+ // gaugeConnectorAppConnectorResources tracks the number of Connectors currently managed by this operator instance that are app connectors.
+ gaugeConnectorAppConnectorResources = clientmetric.NewGauge(kubetypes.MetricConnectorWithAppConnectorCount)
)
func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
@@ -108,13 +114,12 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
oldCnStatus := cn.Status.DeepCopy()
setStatus := func(cn *tsapi.Connector, _ tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetConnectorCondition(cn, tsapi.ConnectorReady, status, reason, message, cn.Generation, a.clock, logger)
- if !apiequality.Semantic.DeepEqual(oldCnStatus, cn.Status) {
+ var updateErr error
+ if !apiequality.Semantic.DeepEqual(oldCnStatus, &cn.Status) {
// An error encountered here should get returned by the Reconcile function.
- if updateErr := a.Client.Status().Update(ctx, cn); updateErr != nil {
- err = errors.Wrap(err, updateErr.Error())
- }
+ updateErr = a.Client.Status().Update(ctx, cn)
}
- return res, err
+ return res, errors.Join(err, updateErr)
}
if !slices.Contains(cn.Finalizers, FinalizerName) {
@@ -131,17 +136,24 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
}
if err := a.validate(cn); err != nil {
- logger.Errorf("error validating Connector spec: %w", err)
message := fmt.Sprintf(messageConnectorInvalid, err)
a.recorder.Eventf(cn, corev1.EventTypeWarning, reasonConnectorInvalid, message)
return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionFalse, reasonConnectorInvalid, message)
}
if err = a.maybeProvisionConnector(ctx, logger, cn); err != nil {
- logger.Errorf("error creating Connector resources: %w", err)
+ reason := reasonConnectorCreationFailed
message := fmt.Sprintf(messageConnectorCreationFailed, err)
- a.recorder.Eventf(cn, corev1.EventTypeWarning, reasonConnectorCreationFailed, message)
- return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionFalse, reasonConnectorCreationFailed, message)
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ reason = reasonConnectorCreating
+ message = fmt.Sprintf("optimistic lock error, retrying: %s", err)
+ err = nil
+ logger.Info(message)
+ } else {
+ a.recorder.Eventf(cn, corev1.EventTypeWarning, reason, message)
+ }
+
+ return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionFalse, reason, message)
}
logger.Info("Connector resources synced")
@@ -150,6 +162,9 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
cn.Status.SubnetRoutes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionTrue, reasonConnectorCreated, reasonConnectorCreated)
}
+ if cn.Spec.AppConnector != nil {
+ cn.Status.IsAppConnector = true
+ }
cn.Status.SubnetRoutes = ""
return setStatus(cn, tsapi.ConnectorReady, metav1.ConditionTrue, reasonConnectorCreated, reasonConnectorCreated)
}
@@ -183,29 +198,44 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
isExitNode: cn.Spec.ExitNode,
},
ProxyClassName: proxyClass,
+ proxyType: proxyTypeConnector,
}
if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 {
sts.Connector.routes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify()
}
+ if cn.Spec.AppConnector != nil {
+ sts.Connector.isAppConnector = true
+ if len(cn.Spec.AppConnector.Routes) != 0 {
+ sts.Connector.routes = cn.Spec.AppConnector.Routes.Stringify()
+ }
+ }
+
a.mu.Lock()
- if sts.Connector.isExitNode {
+ if cn.Spec.ExitNode {
a.exitNodes.Add(cn.UID)
} else {
a.exitNodes.Remove(cn.UID)
}
- if sts.Connector.routes != "" {
+ if cn.Spec.SubnetRouter != nil {
a.subnetRouters.Add(cn.GetUID())
} else {
a.subnetRouters.Remove(cn.GetUID())
}
+ if cn.Spec.AppConnector != nil {
+ a.appConnectors.Add(cn.GetUID())
+ } else {
+ a.appConnectors.Remove(cn.GetUID())
+ }
a.mu.Unlock()
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
+ gaugeConnectorAppConnectorResources.Set(int64(a.appConnectors.Len()))
var connectors set.Slice[types.UID]
connectors.AddSlice(a.exitNodes.Slice())
connectors.AddSlice(a.subnetRouters.Slice())
+ connectors.AddSlice(a.appConnectors.Slice())
gaugeConnectorResources.Set(int64(connectors.Len()))
_, err := a.ssr.Provision(ctx, logger, sts)
@@ -213,27 +243,27 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge
return err
}
- _, tsHost, ips, err := a.ssr.DeviceInfo(ctx, crl)
+ dev, err := a.ssr.DeviceInfo(ctx, crl, logger)
if err != nil {
return err
}
- if tsHost == "" {
- logger.Debugf("no Tailscale hostname known yet, waiting for connector pod to finish auth")
+ if dev == nil || dev.hostname == "" {
+ logger.Debugf("no Tailscale hostname known yet, waiting for Connector Pod to finish auth")
// No hostname yet. Wait for the connector pod to auth.
cn.Status.TailnetIPs = nil
cn.Status.Hostname = ""
return nil
}
- cn.Status.TailnetIPs = ips
- cn.Status.Hostname = tsHost
+ cn.Status.TailnetIPs = dev.ips
+ cn.Status.Hostname = dev.hostname
return nil
}
func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) {
- if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector")); err != nil {
+ if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil {
return false, fmt.Errorf("failed to cleanup Connector resources: %w", err)
} else if !done {
logger.Debugf("Connector cleanup not done yet, waiting for next reconcile")
@@ -248,12 +278,15 @@ func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger
a.mu.Lock()
a.subnetRouters.Remove(cn.UID)
a.exitNodes.Remove(cn.UID)
+ a.appConnectors.Remove(cn.UID)
a.mu.Unlock()
gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len()))
gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len()))
+ gaugeConnectorAppConnectorResources.Set(int64(a.appConnectors.Len()))
var connectors set.Slice[types.UID]
connectors.AddSlice(a.exitNodes.Slice())
connectors.AddSlice(a.subnetRouters.Slice())
+ connectors.AddSlice(a.appConnectors.Slice())
gaugeConnectorResources.Set(int64(connectors.Len()))
return true, nil
}
@@ -262,8 +295,14 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error {
// Connector fields are already validated at apply time with CEL validation
// on custom resource fields. The checks here are a backup in case the
// CEL validation breaks without us noticing.
- if !(cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) {
- return errors.New("invalid spec: a Connector must expose subnet routes or act as an exit node (or both)")
+ if cn.Spec.SubnetRouter == nil && !cn.Spec.ExitNode && cn.Spec.AppConnector == nil {
+ return errors.New("invalid spec: a Connector must be configured as at least one of subnet router, exit node or app connector")
+ }
+ if (cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) && cn.Spec.AppConnector != nil {
+ return errors.New("invalid spec: a Connector that is configured as an app connector must not be also configured as a subnet router or exit node")
+ }
+ if cn.Spec.AppConnector != nil {
+ return validateAppConnector(cn.Spec.AppConnector)
}
if cn.Spec.SubnetRouter == nil {
return nil
@@ -272,19 +311,27 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error {
}
func validateSubnetRouter(sb *tsapi.SubnetRouter) error {
- if len(sb.AdvertiseRoutes) < 1 {
+ if len(sb.AdvertiseRoutes) == 0 {
return errors.New("invalid subnet router spec: no routes defined")
}
- var err error
- for _, route := range sb.AdvertiseRoutes {
+ return validateRoutes(sb.AdvertiseRoutes)
+}
+
+func validateAppConnector(ac *tsapi.AppConnector) error {
+ return validateRoutes(ac.Routes)
+}
+
+func validateRoutes(routes tsapi.Routes) error {
+ var errs []error
+ for _, route := range routes {
pfx, e := netip.ParsePrefix(string(route))
if e != nil {
- err = errors.Wrap(err, fmt.Sprintf("route %s is invalid: %v", route, err))
+ errs = append(errs, fmt.Errorf("route %v is invalid: %v", route, e))
continue
}
if pfx.Masked() != pfx {
- err = errors.Wrap(err, fmt.Sprintf("route %s has non-address bits set; expected %s", pfx, pfx.Masked()))
+ errs = append(errs, fmt.Errorf("route %s has non-address bits set; expected %s", pfx, pfx.Masked()))
}
}
- return err
+ return errors.Join(errs...)
}
diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go
index a4ba90d3d6683..7cdd83115e877 100644
--- a/cmd/k8s-operator/connector_test.go
+++ b/cmd/k8s-operator/connector_test.go
@@ -8,12 +8,14 @@ package main
import (
"context"
"testing"
+ "time"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/kube/kubetypes"
@@ -296,3 +298,100 @@ func TestConnectorWithProxyClass(t *testing.T) {
expectReconciled(t, cr, "", "test")
expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation)
}
+
+func TestConnectorWithAppConnector(t *testing.T) {
+ // Setup
+ cn := &tsapi.Connector{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ UID: types.UID("1234-UID"),
+ },
+ TypeMeta: metav1.TypeMeta{
+ Kind: tsapi.ConnectorKind,
+ APIVersion: "tailscale.io/v1alpha1",
+ },
+ Spec: tsapi.ConnectorSpec{
+ AppConnector: &tsapi.AppConnector{},
+ },
+ }
+ fc := fake.NewClientBuilder().
+ WithScheme(tsapi.GlobalScheme).
+ WithObjects(cn).
+ WithStatusSubresource(cn).
+ Build()
+ ft := &fakeTSClient{}
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cl := tstest.NewClock(tstest.ClockOpts{})
+ fr := record.NewFakeRecorder(1)
+ cr := &ConnectorReconciler{
+ Client: fc,
+ clock: cl,
+ ssr: &tailscaleSTSReconciler{
+ Client: fc,
+ tsClient: ft,
+ defaultTags: []string{"tag:k8s"},
+ operatorNamespace: "operator-ns",
+ proxyImage: "tailscale/tailscale",
+ },
+ logger: zl.Sugar(),
+ recorder: fr,
+ }
+
+ // 1. Connector with app connnector is created and becomes ready
+ expectReconciled(t, cr, "", "test")
+ fullName, shortName := findGenName(t, fc, "", "test", "connector")
+ opts := configOpts{
+ stsName: shortName,
+ secretName: fullName,
+ parentType: "connector",
+ hostname: "test-connector",
+ app: kubetypes.AppConnector,
+ isAppConnector: true,
+ }
+ expectEqual(t, fc, expectedSecret(t, fc, opts), nil)
+ expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation)
+ // Connector's ready condition should be set to true
+
+ cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer")
+ cn.Status.IsAppConnector = true
+ cn.Status.Conditions = []metav1.Condition{{
+ Type: string(tsapi.ConnectorReady),
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
+ Reason: reasonConnectorCreated,
+ Message: reasonConnectorCreated,
+ }}
+ expectEqual(t, fc, cn, nil)
+
+ // 2. Connector with invalid app connector routes has status set to invalid
+ mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) {
+ conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")}
+ })
+ cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")}
+ expectReconciled(t, cr, "", "test")
+ cn.Status.Conditions = []metav1.Condition{{
+ Type: string(tsapi.ConnectorReady),
+ Status: metav1.ConditionFalse,
+ LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
+ Reason: reasonConnectorInvalid,
+ Message: "Connector is invalid: route 1.2.3.4/5 has non-address bits set; expected 0.0.0.0/5",
+ }}
+ expectEqual(t, fc, cn, nil)
+
+ // 3. Connector with valid app connnector routes becomes ready
+ mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) {
+ conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")}
+ })
+ cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")}
+ cn.Status.Conditions = []metav1.Condition{{
+ Type: string(tsapi.ConnectorReady),
+ Status: metav1.ConditionTrue,
+ LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)},
+ Reason: reasonConnectorCreated,
+ Message: reasonConnectorCreated,
+ }}
+ expectReconciled(t, cr, "", "test")
+}
diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt
index b77ea22ef5297..d1d687432863b 100644
--- a/cmd/k8s-operator/depaware.txt
+++ b/cmd/k8s-operator/depaware.txt
@@ -80,10 +80,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus
github.com/bits-and-blooms/bitset from github.com/gaissmai/bart
💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus
- github.com/coder/websocket from tailscale.com/control/controlhttp+
- github.com/coder/websocket/internal/errd from github.com/coder/websocket
- github.com/coder/websocket/internal/util from github.com/coder/websocket
- github.com/coder/websocket/internal/xsync from github.com/coder/websocket
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump
W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+
@@ -310,7 +306,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+
- gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack
+ gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+
gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+
gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+
💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+
@@ -382,7 +378,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
k8s.io/api/storage/v1beta1 from k8s.io/client-go/applyconfigurations/storage/v1beta1+
k8s.io/api/storagemigration/v1alpha1 from k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1+
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
- 💣 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 from sigs.k8s.io/controller-runtime/pkg/webhook/conversion
+ 💣 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 from sigs.k8s.io/controller-runtime/pkg/webhook/conversion+
k8s.io/apimachinery/pkg/api/equality from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+
k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+
k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+
@@ -658,6 +654,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/control/controlbase from tailscale.com/control/controlhttp+
tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+
tailscale.com/control/controlhttp from tailscale.com/control/controlclient
+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/control/controlclient+
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+
@@ -668,6 +665,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal
tailscale.com/drive from tailscale.com/client/tailscale+
tailscale.com/envknob from tailscale.com/client/tailscale+
+ tailscale.com/envknob/featureknob from tailscale.com/client/web+
tailscale.com/health from tailscale.com/control/controlclient+
tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal
tailscale.com/hostinfo from tailscale.com/client/web+
@@ -734,11 +732,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/net/stun from tailscale.com/ipn/localapi+
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/control/controlclient+
+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
tailscale.com/net/tsdial from tailscale.com/control/controlclient+
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
tailscale.com/net/tstun from tailscale.com/tsd+
- tailscale.com/net/wsconn from tailscale.com/control/controlhttp+
tailscale.com/omit from tailscale.com/ipn/conffile
tailscale.com/paths from tailscale.com/client/tailscale+
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
@@ -773,6 +771,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/types/persist from tailscale.com/control/controlclient+
tailscale.com/types/preftype from tailscale.com/ipn+
tailscale.com/types/ptr from tailscale.com/cmd/k8s-operator+
+ tailscale.com/types/result from tailscale.com/util/lineiter
tailscale.com/types/structs from tailscale.com/control/controlclient+
tailscale.com/types/tkatype from tailscale.com/client/tailscale+
tailscale.com/types/views from tailscale.com/appc+
@@ -790,7 +789,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+
tailscale.com/util/httpm from tailscale.com/client/tailscale+
- tailscale.com/util/lineread from tailscale.com/hostinfo+
+ tailscale.com/util/lineiter from tailscale.com/hostinfo+
L tailscale.com/util/linuxfw from tailscale.com/net/netns+
tailscale.com/util/mak from tailscale.com/appc+
tailscale.com/util/multierr from tailscale.com/control/controlclient+
@@ -810,8 +809,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/util/slicesx from tailscale.com/appc+
tailscale.com/util/syspolicy from tailscale.com/control/controlclient+
tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+
- tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy
- tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy
+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+
+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source
+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+
+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+
+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+
tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock
tailscale.com/util/systemd from tailscale.com/control/controlclient+
tailscale.com/util/testenv from tailscale.com/control/controlclient+
@@ -821,7 +823,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/util/vizerror from tailscale.com/tailcfg+
💣 tailscale.com/util/winutil from tailscale.com/clientupdate+
W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+
- W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns
+ W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+
W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
tailscale.com/util/zstdframe from tailscale.com/control/controlclient+
diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml
index c428d5d1e751e..1b9b97186b6ca 100644
--- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml
+++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml
@@ -35,9 +35,13 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- - name: oauth
- secret:
- secretName: operator-oauth
+ - name: oauth
+ {{- with .Values.oauthSecretVolume }}
+ {{- toYaml . | nindent 10 }}
+ {{- else }}
+ secret:
+ secretName: operator-oauth
+ {{- end }}
containers:
- name: operator
{{- with .Values.operatorConfig.securityContext }}
@@ -81,6 +85,14 @@ spec:
- name: PROXY_DEFAULT_CLASS
value: {{ .Values.proxyConfig.defaultProxyClass }}
{{- end }}
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
{{- with .Values.operatorConfig.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
diff --git a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml
index 2a1fa81b42793..208d58ee10f08 100644
--- a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml
+++ b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml
@@ -1,3 +1,4 @@
+{{- if .Values.ingressClass.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
@@ -6,3 +7,4 @@ metadata:
spec:
controller: tailscale.com/ts-ingress # controller name currently can not be changed
# parameters: {} # currently no parameters are supported
+{{- end }}
diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml
index ede61070b4399..637bdf793c2b9 100644
--- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml
+++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml
@@ -6,6 +6,10 @@ kind: ServiceAccount
metadata:
name: operator
namespace: {{ .Release.Namespace }}
+ {{- with .Values.operatorConfig.serviceAccountAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -30,6 +34,10 @@ rules:
- apiGroups: ["tailscale.com"]
resources: ["recorders", "recorders/status"]
verbs: ["get", "list", "watch", "update"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "list", "watch"]
+ resourceNames: ["servicemonitors.monitoring.coreos.com"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@@ -65,6 +73,9 @@ rules:
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings"]
verbs: ["get", "create", "patch", "update", "list", "watch"]
+- apiGroups: ["monitoring.coreos.com"]
+ resources: ["servicemonitors"]
+ verbs: ["get", "list", "update", "create", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
diff --git a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml
index 1c15c9119f971..fa552a7c7e39a 100644
--- a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml
+++ b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml
@@ -16,6 +16,9 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create","delete","deletecollection","get","list","patch","update","watch"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "patch", "get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml
index f5778eee76433..24e94c0493d75 100644
--- a/cmd/k8s-operator/deploy/chart/values.yaml
+++ b/cmd/k8s-operator/deploy/chart/values.yaml
@@ -3,11 +3,26 @@
# Operator oauth credentials. If set a Kubernetes Secret with the provided
# values will be created in the operator namespace. If unset a Secret named
-# operator-oauth must be precreated.
+# operator-oauth must be precreated or oauthSecretVolume needs to be adjusted.
+# This block will be overridden by oauthSecretVolume, if set.
oauth: {}
# clientId: ""
# clientSecret: ""
+# Secret volume.
+# If set it defines the volume the oauth secrets will be mounted from.
+# The volume needs to contain two files named `client_id` and `client_secret`.
+# If unset the volume will reference the Secret named operator-oauth.
+# This block will override the oauth block.
+oauthSecretVolume: {}
+ # csi:
+ # driver: secrets-store.csi.k8s.io
+ # readOnly: true
+ # volumeAttributes:
+ # secretProviderClass: tailscale-oauth
+ #
+ ## NAME is pre-defined!
+
# installCRDs determines whether tailscale.com CRDs should be installed as part
# of chart installation. We do not use Helm's CRD installation mechanism as that
# does not allow for upgrading CRDs.
@@ -40,6 +55,9 @@ operatorConfig:
podAnnotations: {}
podLabels: {}
+ serviceAccountAnnotations: {}
+ # eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/tailscale-operator-role
+
tolerations: []
affinity: {}
@@ -54,6 +72,9 @@ operatorConfig:
# - name: EXTRA_VAR2
# value: "value2"
+# In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here
+ingressClass:
+ enabled: true
# proxyConfig contains configuraton that will be applied to any ingress/egress
# proxies created by the operator.
diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml
index 9614f74e6b162..4434c12835ba1 100644
--- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml
+++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml
@@ -24,6 +24,10 @@ spec:
jsonPath: .status.isExitNode
name: IsExitNode
type: string
+ - description: Whether this Connector instance is an app connector.
+ jsonPath: .status.isAppConnector
+ name: IsAppConnector
+ type: string
- description: Status of the deployed Connector resources.
jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason
name: Status
@@ -66,10 +70,40 @@ spec:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
type: object
properties:
+ appConnector:
+ description: |-
+ AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
+ configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
+ Connector does not act as an app connector.
+ Note that you will need to manually configure the permissions and the domains for the app connector via the
+ Admin panel.
+ Note also that the main tested and supported use case of this config option is to deploy an app connector on
+ Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
+ cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
+ tested or optimised for.
+ If you are using the app connector to access SaaS applications because you need a predictable egress IP that
+ can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
+ via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
+ device with a static IP address.
+ https://tailscale.com/kb/1281/app-connectors
+ type: object
+ properties:
+ routes:
+ description: |-
+ Routes are optional preconfigured routes for the domains routed via the app connector.
+ If not set, routes for the domains will be discovered dynamically.
+ If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
+ also dynamically discover other routes.
+ https://tailscale.com/kb/1332/apps-best-practices#preconfiguration
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: cidr
exitNode:
description: |-
- ExitNode defines whether the Connector node should act as a
- Tailscale exit node. Defaults to false.
+ ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
+ This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes
type: boolean
hostname:
@@ -90,9 +124,11 @@ spec:
type: string
subnetRouter:
description: |-
- SubnetRouter defines subnet routes that the Connector node should
- expose to tailnet. If unset, none are exposed.
+ SubnetRouter defines subnet routes that the Connector device should
+ expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
+ If this field is unset, the device does not get configured as a Tailscale subnet router.
+ This field is mutually exclusive with the appConnector field.
type: object
required:
- advertiseRoutes
@@ -125,8 +161,10 @@ spec:
type: string
pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$
x-kubernetes-validations:
- - rule: has(self.subnetRouter) || self.exitNode == true
- message: A Connector needs to be either an exit node or a subnet router, or both.
+ - rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)
+ message: A Connector needs to have at least one of exit node, subnet router or app connector configured.
+ - rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))'
+ message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields.
status:
description: |-
ConnectorStatus describes the status of the Connector. This is set
@@ -200,6 +238,9 @@ spec:
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node.
type: string
+ isAppConnector:
+ description: IsAppConnector is set to true if the Connector acts as an app connector.
+ type: boolean
isExitNode:
description: IsExitNode is set to true if the Connector acts as an exit node.
type: boolean
diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml
index 0fff30516a132..9b45deedb62b7 100644
--- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml
+++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml
@@ -73,9 +73,35 @@ spec:
enable:
description: |-
Setting enable to true will make the proxy serve Tailscale metrics
- at :9001/debug/metrics.
+ at :9002/metrics.
+ A metrics Service named -metrics will also be created in the operator's namespace and will
+ serve the metrics at :9002/metrics.
+
+ In 1.78.x and 1.80.x, this field also serves as the default value for
+ .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
+ fields will independently default to false.
+
Defaults to false.
type: boolean
+ serviceMonitor:
+ description: |-
+ Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
+ The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
+ The ingested metrics for each Service monitor will have labels to identify the proxy:
+ ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup
+ ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
+ ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
+ job: ts__[]_
+ type: object
+ required:
+ - enable
+ properties:
+ enable:
+ description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled.
+ type: boolean
+ x-kubernetes-validations:
+ - rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)'
+ message: ServiceMonitor can only be enabled if metrics are enabled
statefulSet:
description: |-
Configuration parameters for the proxy's StatefulSet. Tailscale
@@ -1249,6 +1275,25 @@ spec:
description: Configuration for the proxy container running tailscale.
type: object
properties:
+ debug:
+ description: |-
+ Configuration for enabling extra debug information in the container.
+ Not recommended for production use.
+ type: object
+ properties:
+ enable:
+ description: |-
+ Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
+ and internal debug metrics endpoint at :9001/debug/metrics, where
+ 9001 is a container port named "debug". The endpoints and their responses
+ may change in backwards incompatible ways in the future, and should not
+ be considered stable.
+
+ In 1.78.x and 1.80.x, this setting will default to the value of
+ .spec.metrics.enable, and requests to the "metrics" port matching the
+ mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
+ this setting will default to false, and no requests will be proxied.
+ type: boolean
env:
description: |-
List of environment variables to set in the container.
@@ -1360,11 +1405,12 @@ spec:
securityContext:
description: |-
Container security context.
- Security context specified here will override the security context by the operator.
- By default the operator:
- - sets 'privileged: true' for the init container
- - set NET_ADMIN capability for tailscale container for proxies that
- are created for Services or Connector.
+ Security context specified here will override the security context set by the operator.
+ By default the operator sets the Tailscale container and the Tailscale init container to privileged
+ for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
+ You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
+ installing device plugin in your cluster and configuring the proxies tun device to be created
+ by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
type: object
properties:
@@ -1553,6 +1599,25 @@ spec:
description: Configuration for the proxy init container that enables forwarding.
type: object
properties:
+ debug:
+ description: |-
+ Configuration for enabling extra debug information in the container.
+ Not recommended for production use.
+ type: object
+ properties:
+ enable:
+ description: |-
+ Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
+ and internal debug metrics endpoint at :9001/debug/metrics, where
+ 9001 is a container port named "debug". The endpoints and their responses
+ may change in backwards incompatible ways in the future, and should not
+ be considered stable.
+
+ In 1.78.x and 1.80.x, this setting will default to the value of
+ .spec.metrics.enable, and requests to the "metrics" port matching the
+ mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
+ this setting will default to false, and no requests will be proxied.
+ type: boolean
env:
description: |-
List of environment variables to set in the container.
@@ -1664,11 +1729,12 @@ spec:
securityContext:
description: |-
Container security context.
- Security context specified here will override the security context by the operator.
- By default the operator:
- - sets 'privileged: true' for the init container
- - set NET_ADMIN capability for tailscale container for proxies that
- are created for Services or Connector.
+ Security context specified here will override the security context set by the operator.
+ By default the operator sets the Tailscale container and the Tailscale init container to privileged
+ for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
+ You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
+ installing device plugin in your cluster and configuring the proxies tun device to be created
+ by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
type: object
properties:
@@ -1896,6 +1962,182 @@ spec:
Value is the taint value the toleration matches to.
If the operator is Exists, the value should be empty, otherwise just a regular string.
type: string
+ topologySpreadConstraints:
+ description: |-
+ Proxy Pod's topology spread constraints.
+ By default Tailscale Kubernetes operator does not apply any topology spread constraints.
+ https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ type: array
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ type: object
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ type: object
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ type: object
+ required:
+ - key
+ - operator
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ type: array
+ items:
+ type: string
+ x-kubernetes-list-type: atomic
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ additionalProperties:
+ type: string
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ type: array
+ items:
+ type: string
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ type: integer
+ format: int32
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ type: integer
+ format: int32
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
tailscale:
description: |-
TailscaleConfig contains options to configure the tailscale-specific
diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml
index 1a812b7362757..210a7b43463e5 100644
--- a/cmd/k8s-operator/deploy/manifests/operator.yaml
+++ b/cmd/k8s-operator/deploy/manifests/operator.yaml
@@ -53,6 +53,10 @@ spec:
jsonPath: .status.isExitNode
name: IsExitNode
type: string
+ - description: Whether this Connector instance is an app connector.
+ jsonPath: .status.isAppConnector
+ name: IsAppConnector
+ type: string
- description: Status of the deployed Connector resources.
jsonPath: .status.conditions[?(@.type == "ConnectorReady")].reason
name: Status
@@ -91,10 +95,40 @@ spec:
More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
properties:
+ appConnector:
+ description: |-
+ AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
+ configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
+ Connector does not act as an app connector.
+ Note that you will need to manually configure the permissions and the domains for the app connector via the
+ Admin panel.
+ Note also that the main tested and supported use case of this config option is to deploy an app connector on
+ Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
+ cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
+ tested or optimised for.
+ If you are using the app connector to access SaaS applications because you need a predictable egress IP that
+ can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
+ via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
+ device with a static IP address.
+ https://tailscale.com/kb/1281/app-connectors
+ properties:
+ routes:
+ description: |-
+ Routes are optional preconfigured routes for the domains routed via the app connector.
+ If not set, routes for the domains will be discovered dynamically.
+ If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
+ also dynamically discover other routes.
+ https://tailscale.com/kb/1332/apps-best-practices#preconfiguration
+ items:
+ format: cidr
+ type: string
+ minItems: 1
+ type: array
+ type: object
exitNode:
description: |-
- ExitNode defines whether the Connector node should act as a
- Tailscale exit node. Defaults to false.
+ ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
+ This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes
type: boolean
hostname:
@@ -115,9 +149,11 @@ spec:
type: string
subnetRouter:
description: |-
- SubnetRouter defines subnet routes that the Connector node should
- expose to tailnet. If unset, none are exposed.
+ SubnetRouter defines subnet routes that the Connector device should
+ expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
+ If this field is unset, the device does not get configured as a Tailscale subnet router.
+ This field is mutually exclusive with the appConnector field.
properties:
advertiseRoutes:
description: |-
@@ -151,8 +187,10 @@ spec:
type: array
type: object
x-kubernetes-validations:
- - message: A Connector needs to be either an exit node or a subnet router, or both.
- rule: has(self.subnetRouter) || self.exitNode == true
+ - message: A Connector needs to have at least one of exit node, subnet router or app connector configured.
+ rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)
+ - message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields.
+ rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))'
status:
description: |-
ConnectorStatus describes the status of the Connector. This is set
@@ -225,6 +263,9 @@ spec:
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node.
type: string
+ isAppConnector:
+ description: IsAppConnector is set to true if the Connector acts as an app connector.
+ type: boolean
isExitNode:
description: IsExitNode is set to true if the Connector acts as an exit node.
type: boolean
@@ -499,12 +540,38 @@ spec:
enable:
description: |-
Setting enable to true will make the proxy serve Tailscale metrics
- at :9001/debug/metrics.
+ at :9002/metrics.
+ A metrics Service named -metrics will also be created in the operator's namespace and will
+ serve the metrics at :9002/metrics.
+
+ In 1.78.x and 1.80.x, this field also serves as the default value for
+ .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
+ fields will independently default to false.
+
Defaults to false.
type: boolean
+ serviceMonitor:
+ description: |-
+ Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
+ The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
+ The ingested metrics for each Service monitor will have labels to identify the proxy:
+ ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup
+ ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
+ ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
+ job: ts__[]_
+ properties:
+ enable:
+ description: If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled.
+ type: boolean
+ required:
+ - enable
+ type: object
required:
- enable
type: object
+ x-kubernetes-validations:
+ - message: ServiceMonitor can only be enabled if metrics are enabled
+ rule: '!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)'
statefulSet:
description: |-
Configuration parameters for the proxy's StatefulSet. Tailscale
@@ -1675,6 +1742,25 @@ spec:
tailscaleContainer:
description: Configuration for the proxy container running tailscale.
properties:
+ debug:
+ description: |-
+ Configuration for enabling extra debug information in the container.
+ Not recommended for production use.
+ properties:
+ enable:
+ description: |-
+ Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
+ and internal debug metrics endpoint at :9001/debug/metrics, where
+ 9001 is a container port named "debug". The endpoints and their responses
+ may change in backwards incompatible ways in the future, and should not
+ be considered stable.
+
+ In 1.78.x and 1.80.x, this setting will default to the value of
+ .spec.metrics.enable, and requests to the "metrics" port matching the
+ mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
+ this setting will default to false, and no requests will be proxied.
+ type: boolean
+ type: object
env:
description: |-
List of environment variables to set in the container.
@@ -1786,11 +1872,12 @@ spec:
securityContext:
description: |-
Container security context.
- Security context specified here will override the security context by the operator.
- By default the operator:
- - sets 'privileged: true' for the init container
- - set NET_ADMIN capability for tailscale container for proxies that
- are created for Services or Connector.
+ Security context specified here will override the security context set by the operator.
+ By default the operator sets the Tailscale container and the Tailscale init container to privileged
+ for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
+ You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
+ installing device plugin in your cluster and configuring the proxies tun device to be created
+ by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
properties:
allowPrivilegeEscalation:
@@ -1979,6 +2066,25 @@ spec:
tailscaleInitContainer:
description: Configuration for the proxy init container that enables forwarding.
properties:
+ debug:
+ description: |-
+ Configuration for enabling extra debug information in the container.
+ Not recommended for production use.
+ properties:
+ enable:
+ description: |-
+ Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
+ and internal debug metrics endpoint at :9001/debug/metrics, where
+ 9001 is a container port named "debug". The endpoints and their responses
+ may change in backwards incompatible ways in the future, and should not
+ be considered stable.
+
+ In 1.78.x and 1.80.x, this setting will default to the value of
+ .spec.metrics.enable, and requests to the "metrics" port matching the
+ mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
+ this setting will default to false, and no requests will be proxied.
+ type: boolean
+ type: object
env:
description: |-
List of environment variables to set in the container.
@@ -2090,11 +2196,12 @@ spec:
securityContext:
description: |-
Container security context.
- Security context specified here will override the security context by the operator.
- By default the operator:
- - sets 'privileged: true' for the init container
- - set NET_ADMIN capability for tailscale container for proxies that
- are created for Services or Connector.
+ Security context specified here will override the security context set by the operator.
+ By default the operator sets the Tailscale container and the Tailscale init container to privileged
+ for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
+ You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
+ installing device plugin in your cluster and configuring the proxies tun device to be created
+ by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
properties:
allowPrivilegeEscalation:
@@ -2323,6 +2430,182 @@ spec:
type: string
type: object
type: array
+ topologySpreadConstraints:
+ description: |-
+ Proxy Pod's topology spread constraints.
+ By default Tailscale Kubernetes operator does not apply any topology spread constraints.
+ https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
type: object
type: object
tailscale:
@@ -4386,6 +4669,16 @@ rules:
- list
- watch
- update
+ - apiGroups:
+ - apiextensions.k8s.io
+ resourceNames:
+ - servicemonitors.monitoring.coreos.com
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@@ -4466,6 +4759,16 @@ rules:
- update
- list
- watch
+ - apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ verbs:
+ - get
+ - list
+ - update
+ - create
+ - delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -4486,6 +4789,14 @@ rules:
- patch
- update
- watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
@@ -4558,6 +4869,14 @@ spec:
value: "false"
- name: PROXY_FIREWALL_MODE
value: auto
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
image: tailscale/k8s-operator:unstable
imagePullPolicy: Always
name: operator
diff --git a/cmd/k8s-operator/deploy/manifests/proxy.yaml b/cmd/k8s-operator/deploy/manifests/proxy.yaml
index a79d48d73ce0f..3c9a3eaa36c56 100644
--- a/cmd/k8s-operator/deploy/manifests/proxy.yaml
+++ b/cmd/k8s-operator/deploy/manifests/proxy.yaml
@@ -30,7 +30,13 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
securityContext:
- capabilities:
- add:
- - NET_ADMIN
+ privileged: true
diff --git a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml
index 46b49a57b1909..6617f6d4b52fe 100644
--- a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml
+++ b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml
@@ -24,3 +24,11 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go
index bba87bf255910..f91dd49ec255e 100644
--- a/cmd/k8s-operator/dnsrecords.go
+++ b/cmd/k8s-operator/dnsrecords.go
@@ -10,6 +10,7 @@ import (
"encoding/json"
"fmt"
"slices"
+ "strings"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
@@ -98,7 +99,15 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile.
return reconcile.Result{}, nil
}
- return reconcile.Result{}, dnsRR.maybeProvision(ctx, headlessSvc, logger)
+ if err := dnsRR.maybeProvision(ctx, headlessSvc, logger); err != nil {
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ logger.Infof("optimistic lock error, retrying: %s", err)
+ } else {
+ return reconcile.Result{}, err
+ }
+ }
+
+ return reconcile.Result{}, nil
}
// maybeProvision ensures that dnsrecords ConfigMap contains a record for the
diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go
index f6991145f88fc..f1964d452633c 100644
--- a/cmd/k8s-operator/egress-services-readiness.go
+++ b/cmd/k8s-operator/egress-services-readiness.go
@@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
oldStatus := svc.Status.DeepCopy()
defer func() {
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l)
- if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) {
+ if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
err = errors.Join(err, esrr.Status().Update(ctx, svc))
}
}()
diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go
index 98ed943669cd0..7544376fb2e65 100644
--- a/cmd/k8s-operator/egress-services.go
+++ b/cmd/k8s-operator/egress-services.go
@@ -51,12 +51,12 @@ const (
labelSvcType = "tailscale.com/svc-type" // ingress or egress
typeEgress = "egress"
// maxPorts is the maximum number of ports that can be exposed on a
- // container. In practice this will be ports in range [3000 - 4000). The
+ // container. In practice this will be ports in range [10000 - 11000). The
// high range should make it easier to distinguish container ports from
// the tailnet target ports for debugging purposes (i.e when reading
- // netfilter rules). The limit of 10000 is somewhat arbitrary, the
+ // netfilter rules). The limit of 1000 is somewhat arbitrary, the
// assumption is that this would not be hit in practice.
- maxPorts = 10000
+ maxPorts = 1000
indexEgressProxyGroup = ".metadata.annotations.egress-proxy-group"
)
@@ -123,7 +123,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
oldStatus := svc.Status.DeepCopy()
defer func() {
- if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) {
+ if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
err = errors.Join(err, esr.Status().Update(ctx, svc))
}
}()
@@ -136,9 +136,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
}
if !slices.Contains(svc.Finalizers, FinalizerName) {
- l.Infof("configuring tailnet service") // logged exactly once
svc.Finalizers = append(svc.Finalizers, FinalizerName)
- if err := esr.Update(ctx, svc); err != nil {
+ if err := esr.updateSvcSpec(ctx, svc); err != nil {
err := fmt.Errorf("failed to add finalizer: %w", err)
r := svcConfiguredReason(svc, false, l)
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l)
@@ -157,7 +156,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
return res, err
}
- return res, esr.maybeProvision(ctx, svc, l)
+ if err := esr.maybeProvision(ctx, svc, l); err != nil {
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ l.Infof("optimistic lock error, retrying: %s", err)
+ } else {
+ return reconcile.Result{}, err
+ }
+ }
+
+ return res, nil
}
func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) {
@@ -198,7 +205,7 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1
if svc.Spec.ExternalName != clusterIPSvcFQDN {
l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN)
svc.Spec.ExternalName = clusterIPSvcFQDN
- if err = esr.Update(ctx, svc); err != nil {
+ if err = esr.updateSvcSpec(ctx, svc); err != nil {
err = fmt.Errorf("error updating ExternalName Service: %w", err)
return err
}
@@ -222,6 +229,15 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
found := false
for _, wantsPM := range svc.Spec.Ports {
if wantsPM.Port == pm.Port && strings.EqualFold(string(wantsPM.Protocol), string(pm.Protocol)) {
+ // We don't use the port name to distinguish this port internally, but Kubernetes
+ // require that, for Service ports with more than one name each port is uniquely named.
+ // So we can always pick the port name from the ExternalName Service as at this point we
+ // know that those are valid names because Kuberentes already validated it once. Note
+ // that users could have changed an unnamed port to a named port and might have changed
+ // port names- this should still work.
+ // https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
+ // See also https://github.com/tailscale/tailscale/issues/13406#issuecomment-2507230388
+ clusterIPSvc.Spec.Ports[i].Name = wantsPM.Name
found = true
break
}
@@ -246,7 +262,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s
if !found {
// Calculate a free port to expose on container and add
// a new PortMap to the ClusterIP Service.
- if usedPorts.Len() == maxPorts {
+ if usedPorts.Len() >= maxPorts {
// TODO(irbekrm): refactor to avoid extra reconciles here. Low priority as in practice,
// the limit should not be hit.
return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts)
@@ -540,13 +556,13 @@ func svcNameBase(s string) string {
}
}
-// unusedPort returns a port in range [3000 - 4000). The caller must ensure that
-// usedPorts does not contain all ports in range [3000 - 4000).
+// unusedPort returns a port in range [10000 - 11000). The caller must ensure that
+// usedPorts does not contain all ports in range [10000 - 11000).
func unusedPort(usedPorts sets.Set[int32]) int32 {
foundFreePort := false
var suggestPort int32
for !foundFreePort {
- suggestPort = rand.Int32N(maxPorts) + 3000
+ suggestPort = rand.Int32N(maxPorts) + 10000
if !usedPorts.Has(suggestPort) {
foundFreePort = true
}
@@ -714,3 +730,13 @@ func epsPortsFromSvc(svc *corev1.Service) (ep []discoveryv1.EndpointPort) {
}
return ep
}
+
+// updateSvcSpec ensures that the given Service's spec is updated in cluster, but the local Service object still retains
+// the not-yet-applied status.
+// TODO(irbekrm): once we do SSA for these patch updates, this will no longer be needed.
+func (esr *egressSvcsReconciler) updateSvcSpec(ctx context.Context, svc *corev1.Service) error {
+ st := svc.Status.DeepCopy()
+ err := esr.Update(ctx, svc)
+ svc.Status = *st
+ return err
+}
diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go
index ac77339853ebe..06fe977ecc130 100644
--- a/cmd/k8s-operator/egress-services_test.go
+++ b/cmd/k8s-operator/egress-services_test.go
@@ -105,28 +105,40 @@ func TestTailscaleEgressServices(t *testing.T) {
condition(tsapi.ProxyGroupReady, metav1.ConditionTrue, "", "", clock),
}
})
- // Quirks of the fake client.
- mustUpdateStatus(t, fc, "default", "test", func(svc *corev1.Service) {
- svc.Status.Conditions = []metav1.Condition{}
+ expectReconciled(t, esr, "default", "test")
+ validateReadyService(t, fc, esr, svc, clock, zl, cm)
+ })
+ t.Run("service_retain_one_unnamed_port", func(t *testing.T) {
+ svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80}}
+ mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
+ s.Spec.Ports = svc.Spec.Ports
})
expectReconciled(t, esr, "default", "test")
- // Verify that a ClusterIP Service has been created.
- name := findGenNameForEgressSvcResources(t, fc, svc)
- expectEqual(t, fc, clusterIPSvc(name, svc), removeTargetPortsFromSvc)
- clusterSvc := mustGetClusterIPSvc(t, fc, name)
- // Verify that an EndpointSlice has been created.
- expectEqual(t, fc, endpointSlice(name, svc, clusterSvc), nil)
- // Verify that ConfigMap contains configuration for the new egress service.
- mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm)
- r := svcConfiguredReason(svc, true, zl.Sugar())
- // Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the
- // CluterIP Service.
- svc.Status.Conditions = []metav1.Condition{
- condition(tsapi.EgressSvcConfigured, metav1.ConditionTrue, r, r, clock),
- }
- svc.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"}
- svc.Spec.ExternalName = fmt.Sprintf("%s.operator-ns.svc.cluster.local", name)
- expectEqual(t, fc, svc, nil)
+ validateReadyService(t, fc, esr, svc, clock, zl, cm)
+ })
+ t.Run("service_add_two_named_ports", func(t *testing.T) {
+ svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80, Name: "http"}, {Protocol: "TCP", Port: 443, Name: "https"}}
+ mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
+ s.Spec.Ports = svc.Spec.Ports
+ })
+ expectReconciled(t, esr, "default", "test")
+ validateReadyService(t, fc, esr, svc, clock, zl, cm)
+ })
+ t.Run("service_add_udp_port", func(t *testing.T) {
+ svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{Port: 53, Protocol: "UDP", Name: "dns"})
+ mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
+ s.Spec.Ports = svc.Spec.Ports
+ })
+ expectReconciled(t, esr, "default", "test")
+ validateReadyService(t, fc, esr, svc, clock, zl, cm)
+ })
+ t.Run("service_change_protocol", func(t *testing.T) {
+ svc.Spec.Ports = []corev1.ServicePort{{Protocol: "TCP", Port: 80, Name: "http"}, {Protocol: "TCP", Port: 443, Name: "https"}, {Port: 53, Protocol: "TCP", Name: "tcp_dns"}}
+ mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
+ s.Spec.Ports = svc.Spec.Ports
+ })
+ expectReconciled(t, esr, "default", "test")
+ validateReadyService(t, fc, esr, svc, clock, zl, cm)
})
t.Run("delete_external_name_service", func(t *testing.T) {
@@ -143,6 +155,29 @@ func TestTailscaleEgressServices(t *testing.T) {
})
}
+func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReconciler, svc *corev1.Service, clock *tstest.Clock, zl *zap.Logger, cm *corev1.ConfigMap) {
+ expectReconciled(t, esr, "default", "test")
+ // Verify that a ClusterIP Service has been created.
+ name := findGenNameForEgressSvcResources(t, fc, svc)
+ expectEqual(t, fc, clusterIPSvc(name, svc), removeTargetPortsFromSvc)
+ clusterSvc := mustGetClusterIPSvc(t, fc, name)
+ // Verify that an EndpointSlice has been created.
+ expectEqual(t, fc, endpointSlice(name, svc, clusterSvc), nil)
+ // Verify that ConfigMap contains configuration for the new egress service.
+ mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm)
+ r := svcConfiguredReason(svc, true, zl.Sugar())
+ // Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the
+ // CluterIP Service.
+ svc.Status.Conditions = []metav1.Condition{
+ condition(tsapi.EgressSvcValid, metav1.ConditionTrue, "EgressSvcValid", "EgressSvcValid", clock),
+ condition(tsapi.EgressSvcConfigured, metav1.ConditionTrue, r, r, clock),
+ }
+ svc.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"}
+ svc.Spec.ExternalName = fmt.Sprintf("%s.operator-ns.svc.cluster.local", name)
+ expectEqual(t, fc, svc, nil)
+
+}
+
func condition(typ tsapi.ConditionType, st metav1.ConditionStatus, r, msg string, clock tstime.Clock) metav1.Condition {
return metav1.Condition{
Type: string(typ),
diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go
index acc90d465093a..3eb47dfb00ad3 100644
--- a/cmd/k8s-operator/ingress.go
+++ b/cmd/k8s-operator/ingress.go
@@ -76,7 +76,15 @@ func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, a.maybeCleanup(ctx, logger, ing)
}
- return reconcile.Result{}, a.maybeProvision(ctx, logger, ing)
+ if err := a.maybeProvision(ctx, logger, ing); err != nil {
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ logger.Infof("optimistic lock error, retrying: %s", err)
+ } else {
+ return reconcile.Result{}, err
+ }
+ }
+
+ return reconcile.Result{}, nil
}
func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error {
@@ -90,7 +98,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
return nil
}
- if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress")); err != nil {
+ if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil {
return fmt.Errorf("failed to cleanup: %w", err)
} else if !done {
logger.Debugf("cleanup not done yet, waiting for next reconcile")
@@ -268,6 +276,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
Tags: tags,
ChildResourceLabels: crl,
ProxyClassName: proxyClass,
+ proxyType: proxyTypeIngressResource,
}
if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" {
@@ -278,12 +287,12 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
return fmt.Errorf("failed to provision: %w", err)
}
- _, tsHost, _, err := a.ssr.DeviceInfo(ctx, crl)
+ dev, err := a.ssr.DeviceInfo(ctx, crl, logger)
if err != nil {
- return fmt.Errorf("failed to get device ID: %w", err)
+ return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err)
}
- if tsHost == "" {
- logger.Debugf("no Tailscale hostname known yet, waiting for proxy pod to finish auth")
+ if dev == nil || dev.ingressDNSName == "" {
+ logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress")
// No hostname yet. Wait for the proxy pod to auth.
ing.Status.LoadBalancer.Ingress = nil
if err := a.Status().Update(ctx, ing); err != nil {
@@ -292,10 +301,10 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
return nil
}
- logger.Debugf("setting ingress hostname to %q", tsHost)
+ logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName)
ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{
{
- Hostname: tsHost,
+ Hostname: dev.ingressDNSName,
Ports: []networkingv1.IngressPortStatus{
{
Protocol: "TCP",
diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go
index 38a041dde07f9..c4332908a08f9 100644
--- a/cmd/k8s-operator/ingress_test.go
+++ b/cmd/k8s-operator/ingress_test.go
@@ -12,6 +12,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -141,6 +142,154 @@ func TestTailscaleIngress(t *testing.T) {
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
}
+func TestTailscaleIngressHostname(t *testing.T) {
+ tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
+ fc := fake.NewFakeClient(tsIngressClass)
+ ft := &fakeTSClient{}
+ fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ingR := &IngressReconciler{
+ Client: fc,
+ ssr: &tailscaleSTSReconciler{
+ Client: fc,
+ tsClient: ft,
+ tsnetServer: fakeTsnetServer,
+ defaultTags: []string{"tag:k8s"},
+ operatorNamespace: "operator-ns",
+ proxyImage: "tailscale/tailscale",
+ },
+ logger: zl.Sugar(),
+ }
+
+ // 1. Resources get created for regular Ingress
+ ing := &networkingv1.Ingress{
+ TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ // The apiserver is supposed to set the UID, but the fake client
+ // doesn't. So, set it explicitly because other code later depends
+ // on it being set.
+ UID: types.UID("1234-UID"),
+ },
+ Spec: networkingv1.IngressSpec{
+ IngressClassName: ptr.To("tailscale"),
+ DefaultBackend: &networkingv1.IngressBackend{
+ Service: &networkingv1.IngressServiceBackend{
+ Name: "test",
+ Port: networkingv1.ServiceBackendPort{
+ Number: 8080,
+ },
+ },
+ },
+ TLS: []networkingv1.IngressTLS{
+ {Hosts: []string{"default-test"}},
+ },
+ },
+ }
+ mustCreate(t, fc, ing)
+ mustCreate(t, fc, &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "1.2.3.4",
+ Ports: []corev1.ServicePort{{
+ Port: 8080,
+ Name: "http"},
+ },
+ },
+ })
+
+ expectReconciled(t, ingR, "default", "test")
+
+ fullName, shortName := findGenName(t, fc, "default", "test", "ingress")
+ mustCreate(t, fc, &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fullName,
+ Namespace: "operator-ns",
+ UID: "test-uid",
+ },
+ })
+ opts := configOpts{
+ stsName: shortName,
+ secretName: fullName,
+ namespace: "default",
+ parentType: "ingress",
+ hostname: "default-test",
+ app: kubetypes.AppIngressResource,
+ }
+ serveConfig := &ipn.ServeConfig{
+ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
+ Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
+ }
+ opts.serveConfig = serveConfig
+
+ expectEqual(t, fc, expectedSecret(t, fc, opts), nil)
+ expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil)
+ expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation)
+
+ // 2. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint set
+ mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) {
+ mak.Set(&secret.Data, "device_id", []byte("1234"))
+ mak.Set(&secret.Data, "tailscale_capver", []byte("110"))
+ mak.Set(&secret.Data, "pod_uid", []byte("test-uid"))
+ mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
+ })
+ expectReconciled(t, ingR, "default", "test")
+ ing.Finalizers = append(ing.Finalizers, "tailscale.com/finalizer")
+
+ expectEqual(t, fc, ing, nil)
+
+ // 3. Ingress proxy with capability version >= 110 advertises HTTPS endpoint
+ mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) {
+ mak.Set(&secret.Data, "device_id", []byte("1234"))
+ mak.Set(&secret.Data, "tailscale_capver", []byte("110"))
+ mak.Set(&secret.Data, "pod_uid", []byte("test-uid"))
+ mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
+ mak.Set(&secret.Data, "https_endpoint", []byte("foo.tailnetxyz.ts.net"))
+ })
+ expectReconciled(t, ingR, "default", "test")
+ ing.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{
+ Ingress: []networkingv1.IngressLoadBalancerIngress{
+ {Hostname: "foo.tailnetxyz.ts.net", Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}},
+ },
+ }
+ expectEqual(t, fc, ing, nil)
+
+ // 4. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint ready
+ mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) {
+ mak.Set(&secret.Data, "device_id", []byte("1234"))
+ mak.Set(&secret.Data, "tailscale_capver", []byte("110"))
+ mak.Set(&secret.Data, "pod_uid", []byte("test-uid"))
+ mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
+ mak.Set(&secret.Data, "https_endpoint", []byte("no-https"))
+ })
+ expectReconciled(t, ingR, "default", "test")
+ ing.Status.LoadBalancer.Ingress = nil
+ expectEqual(t, fc, ing, nil)
+
+ // 5. Ingress proxy's state has https_endpoints set, but its capver is not matching Pod UID (downgrade)
+ mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) {
+ mak.Set(&secret.Data, "device_id", []byte("1234"))
+ mak.Set(&secret.Data, "tailscale_capver", []byte("110"))
+ mak.Set(&secret.Data, "pod_uid", []byte("not-the-right-uid"))
+ mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net"))
+ mak.Set(&secret.Data, "https_endpoint", []byte("bar.tailnetxyz.ts.net"))
+ })
+ ing.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{
+ Ingress: []networkingv1.IngressLoadBalancerIngress{
+ {Hostname: "foo.tailnetxyz.ts.net", Ports: []networkingv1.IngressPortStatus{{Port: 443, Protocol: "TCP"}}},
+ },
+ }
+ expectReconciled(t, ingR, "default", "test")
+ expectEqual(t, fc, ing, nil)
+}
+
func TestTailscaleIngressWithProxyClass(t *testing.T) {
// Setup
pc := &tsapi.ProxyClass{
@@ -271,3 +420,124 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) {
opts.proxyClass = ""
expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation)
}
+
+func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
+ pc := &tsapi.ProxyClass{
+ ObjectMeta: metav1.ObjectMeta{Name: "metrics", Generation: 1},
+ Spec: tsapi.ProxyClassSpec{
+ Metrics: &tsapi.Metrics{
+ Enable: true,
+ ServiceMonitor: &tsapi.ServiceMonitor{Enable: true},
+ },
+ },
+ Status: tsapi.ProxyClassStatus{
+ Conditions: []metav1.Condition{{
+ Status: metav1.ConditionTrue,
+ Type: string(tsapi.ProxyClassReady),
+ ObservedGeneration: 1,
+ }}},
+ }
+ crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
+ tsIngressClass := &networkingv1.IngressClass{ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, Spec: networkingv1.IngressClassSpec{Controller: "tailscale.com/ts-ingress"}}
+ fc := fake.NewClientBuilder().
+ WithScheme(tsapi.GlobalScheme).
+ WithObjects(pc, tsIngressClass).
+ WithStatusSubresource(pc).
+ Build()
+ ft := &fakeTSClient{}
+ fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}}
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ingR := &IngressReconciler{
+ Client: fc,
+ ssr: &tailscaleSTSReconciler{
+ Client: fc,
+ tsClient: ft,
+ tsnetServer: fakeTsnetServer,
+ defaultTags: []string{"tag:k8s"},
+ operatorNamespace: "operator-ns",
+ proxyImage: "tailscale/tailscale",
+ },
+ logger: zl.Sugar(),
+ }
+ // 1. Enable metrics- expect metrics Service to be created
+ ing := &networkingv1.Ingress{
+ TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ // The apiserver is supposed to set the UID, but the fake client
+ // doesn't. So, set it explicitly because other code later depends
+ // on it being set.
+ UID: types.UID("1234-UID"),
+ Labels: map[string]string{
+ "tailscale.com/proxy-class": "metrics",
+ },
+ },
+ Spec: networkingv1.IngressSpec{
+ IngressClassName: ptr.To("tailscale"),
+ DefaultBackend: &networkingv1.IngressBackend{
+ Service: &networkingv1.IngressServiceBackend{
+ Name: "test",
+ Port: networkingv1.ServiceBackendPort{
+ Number: 8080,
+ },
+ },
+ },
+ TLS: []networkingv1.IngressTLS{
+ {Hosts: []string{"default-test"}},
+ },
+ },
+ }
+ mustCreate(t, fc, ing)
+ mustCreate(t, fc, &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "1.2.3.4",
+ Ports: []corev1.ServicePort{{
+ Port: 8080,
+ Name: "http"},
+ },
+ },
+ })
+
+ expectReconciled(t, ingR, "default", "test")
+
+ fullName, shortName := findGenName(t, fc, "default", "test", "ingress")
+ opts := configOpts{
+ stsName: shortName,
+ secretName: fullName,
+ namespace: "default",
+ tailscaleNamespace: "operator-ns",
+ parentType: "ingress",
+ hostname: "default-test",
+ app: kubetypes.AppIngressResource,
+ enableMetrics: true,
+ namespaced: true,
+ proxyType: proxyTypeIngressResource,
+ }
+ serveConfig := &ipn.ServeConfig{
+ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
+ Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}},
+ }
+ opts.serveConfig = serveConfig
+
+ expectEqual(t, fc, expectedSecret(t, fc, opts), nil)
+ expectEqual(t, fc, expectedHeadlessService(shortName, "ingress"), nil)
+ expectEqual(t, fc, expectedMetricsService(opts), nil)
+ expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation)
+ // 2. Enable ServiceMonitor - should not error when there is no ServiceMonitor CRD in cluster
+ mustUpdate(t, fc, "", "metrics", func(pc *tsapi.ProxyClass) {
+ pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true}
+ })
+ expectReconciled(t, ingR, "default", "test")
+ // 3. Create ServiceMonitor CRD and reconcile- ServiceMonitor should get created
+ mustCreate(t, fc, crd)
+ expectReconciled(t, ingR, "default", "test")
+ expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts))
+}
diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go
new file mode 100644
index 0000000000000..4881436e8e184
--- /dev/null
+++ b/cmd/k8s-operator/metrics_resources.go
@@ -0,0 +1,272 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !plan9
+
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
+)
+
+const (
+ labelMetricsTarget = "tailscale.com/metrics-target"
+
+ // These labels get transferred from the metrics Service to the ingested Prometheus metrics.
+ labelPromProxyType = "ts_proxy_type"
+ labelPromProxyParentName = "ts_proxy_parent_name"
+ labelPromProxyParentNamespace = "ts_proxy_parent_namespace"
+ labelPromJob = "ts_prom_job"
+
+ serviceMonitorCRD = "servicemonitors.monitoring.coreos.com"
+)
+
+// ServiceMonitor contains a subset of fields of servicemonitors.monitoring.coreos.com Custom Resource Definition.
+// Duplicating it here allows us to avoid importing prometheus-operator library.
+// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L40
+type ServiceMonitor struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+ Spec ServiceMonitorSpec `json:"spec"`
+}
+
+// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L55
+type ServiceMonitorSpec struct {
+ // Endpoints defines the endpoints to be scraped on the selected Service(s).
+ // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L82
+ Endpoints []ServiceMonitorEndpoint `json:"endpoints"`
+ // JobLabel is the label on the Service whose value will become the value of the Prometheus job label for the metrics ingested via this ServiceMonitor.
+ // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L66
+ JobLabel string `json:"jobLabel"`
+ // NamespaceSelector selects the namespace of Service(s) that this ServiceMonitor allows to scrape.
+ // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L88
+ NamespaceSelector ServiceMonitorNamespaceSelector `json:"namespaceSelector,omitempty"`
+ // Selector is the label selector for Service(s) that this ServiceMonitor allows to scrape.
+ // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L85
+ Selector metav1.LabelSelector `json:"selector"`
+ // TargetLabels are labels on the selected Service that should be applied as Prometheus labels to the ingested metrics.
+ // https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L72
+ TargetLabels []string `json:"targetLabels"`
+}
+
+// ServiceMonitorNamespaceSelector selects namespaces in which Prometheus operator will attempt to find Services for
+// this ServiceMonitor.
+// https://github.com/prometheus-operator/prometheus-operator/blob/bb4514e0d5d69f20270e29cfd4ad39b87865ccdf/pkg/apis/monitoring/v1/servicemonitor_types.go#L88
+type ServiceMonitorNamespaceSelector struct {
+ MatchNames []string `json:"matchNames,omitempty"`
+}
+
+// ServiceMonitorEndpoint defines an endpoint of Service to scrape. We only define port here. Prometheus by default
+// scrapes /metrics path, which is what we want.
+type ServiceMonitorEndpoint struct {
+ // Port is the name of the Service port that Prometheus will scrape.
+ Port string `json:"port,omitempty"`
+}
+
+func reconcileMetricsResources(ctx context.Context, logger *zap.SugaredLogger, opts *metricsOpts, pc *tsapi.ProxyClass, cl client.Client) error {
+ if opts.proxyType == proxyTypeEgress {
+ // Metrics are currently not being enabled for standalone egress proxies.
+ return nil
+ }
+ if pc == nil || pc.Spec.Metrics == nil || !pc.Spec.Metrics.Enable {
+ return maybeCleanupMetricsResources(ctx, opts, cl)
+ }
+ metricsSvc := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: metricsResourceName(opts.proxyStsName),
+ Namespace: opts.tsNamespace,
+ Labels: metricsResourceLabels(opts),
+ },
+ Spec: corev1.ServiceSpec{
+ Selector: opts.proxyLabels,
+ Type: corev1.ServiceTypeClusterIP,
+ Ports: []corev1.ServicePort{{Protocol: "TCP", Port: 9002, Name: "metrics"}},
+ },
+ }
+ var err error
+ metricsSvc, err = createOrUpdate(ctx, cl, opts.tsNamespace, metricsSvc, func(svc *corev1.Service) {
+ svc.Spec.Ports = metricsSvc.Spec.Ports
+ svc.Spec.Selector = metricsSvc.Spec.Selector
+ })
+ if err != nil {
+ return fmt.Errorf("error ensuring metrics Service: %w", err)
+ }
+
+ crdExists, err := hasServiceMonitorCRD(ctx, cl)
+ if err != nil {
+ return fmt.Errorf("error verifying that %q CRD exists: %w", serviceMonitorCRD, err)
+ }
+ if !crdExists {
+ return nil
+ }
+
+ if pc.Spec.Metrics.ServiceMonitor == nil || !pc.Spec.Metrics.ServiceMonitor.Enable {
+ return maybeCleanupServiceMonitor(ctx, cl, opts.proxyStsName, opts.tsNamespace)
+ }
+
+ logger.Info("ensuring ServiceMonitor for metrics Service %s/%s", metricsSvc.Namespace, metricsSvc.Name)
+ svcMonitor, err := newServiceMonitor(metricsSvc)
+ if err != nil {
+ return fmt.Errorf("error creating ServiceMonitor: %w", err)
+ }
+ // We don't use createOrUpdate here because that does not work with unstructured types. We also do not update
+ // the ServiceMonitor because it is not expected that any of its fields would change. Currently this is good
+ // enough, but in future we might want to add logic to create-or-update unstructured types.
+ err = cl.Get(ctx, client.ObjectKeyFromObject(metricsSvc), svcMonitor.DeepCopy())
+ if apierrors.IsNotFound(err) {
+ if err := cl.Create(ctx, svcMonitor); err != nil {
+ return fmt.Errorf("error creating ServiceMonitor: %w", err)
+ }
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("error getting ServiceMonitor: %w", err)
+ }
+ return nil
+}
+
+// maybeCleanupMetricsResources ensures that any metrics resources created for a proxy are deleted. Only metrics Service
+// gets deleted explicitly because the ServiceMonitor has Service's owner reference, so gets garbage collected
+// automatically.
+func maybeCleanupMetricsResources(ctx context.Context, opts *metricsOpts, cl client.Client) error {
+ sel := metricsSvcSelector(opts.proxyLabels, opts.proxyType)
+ return cl.DeleteAllOf(ctx, &corev1.Service{}, client.InNamespace(opts.tsNamespace), client.MatchingLabels(sel))
+}
+
+// maybeCleanupServiceMonitor cleans up any ServiceMonitor created for the named proxy StatefulSet.
+func maybeCleanupServiceMonitor(ctx context.Context, cl client.Client, stsName, ns string) error {
+ smName := metricsResourceName(stsName)
+ sm := serviceMonitorTemplate(smName, ns)
+ u, err := serviceMonitorToUnstructured(sm)
+ if err != nil {
+ return fmt.Errorf("error building ServiceMonitor: %w", err)
+ }
+ err = cl.Get(ctx, types.NamespacedName{Name: smName, Namespace: ns}, u)
+ if apierrors.IsNotFound(err) {
+ return nil // nothing to do
+ }
+ if err != nil {
+ return fmt.Errorf("error verifying if ServiceMonitor %s/%s exists: %w", ns, stsName, err)
+ }
+ return cl.Delete(ctx, u)
+}
+
+// newServiceMonitor takes a metrics Service created for a proxy and constructs and returns a ServiceMonitor for that
+// proxy that can be applied to the kube API server.
+// The ServiceMonitor is returned as Unstructured type - this allows us to avoid importing prometheus-operator API server client/schema.
+func newServiceMonitor(metricsSvc *corev1.Service) (*unstructured.Unstructured, error) {
+ sm := serviceMonitorTemplate(metricsSvc.Name, metricsSvc.Namespace)
+ sm.ObjectMeta.Labels = metricsSvc.Labels
+ sm.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(metricsSvc, corev1.SchemeGroupVersion.WithKind("Service"))}
+ sm.Spec = ServiceMonitorSpec{
+ Selector: metav1.LabelSelector{MatchLabels: metricsSvc.Labels},
+ Endpoints: []ServiceMonitorEndpoint{{
+ Port: "metrics",
+ }},
+ NamespaceSelector: ServiceMonitorNamespaceSelector{
+ MatchNames: []string{metricsSvc.Namespace},
+ },
+ JobLabel: labelPromJob,
+ TargetLabels: []string{
+ labelPromProxyParentName,
+ labelPromProxyParentNamespace,
+ labelPromProxyType,
+ },
+ }
+ return serviceMonitorToUnstructured(sm)
+}
+
+// serviceMonitorToUnstructured takes a ServiceMonitor and converts it to Unstructured type that can be used by the c/r
+// client in Kubernetes API server calls.
+func serviceMonitorToUnstructured(sm *ServiceMonitor) (*unstructured.Unstructured, error) {
+ contents, err := runtime.DefaultUnstructuredConverter.ToUnstructured(sm)
+ if err != nil {
+ return nil, fmt.Errorf("error converting ServiceMonitor to Unstructured: %w", err)
+ }
+ u := &unstructured.Unstructured{}
+ u.SetUnstructuredContent(contents)
+ u.SetGroupVersionKind(sm.GroupVersionKind())
+ return u, nil
+}
+
+// metricsResourceName returns name for metrics Service and ServiceMonitor for a proxy StatefulSet.
+func metricsResourceName(stsName string) string {
+ // Maximum length of StatefulSet name if 52 chars, so this is fine.
+ return fmt.Sprintf("%s-metrics", stsName)
+}
+
+// metricsResourceLabels constructs labels that will be applied to metrics Service and metrics ServiceMonitor for a
+// proxy.
+func metricsResourceLabels(opts *metricsOpts) map[string]string {
+ lbls := map[string]string{
+ LabelManaged: "true",
+ labelMetricsTarget: opts.proxyStsName,
+ labelPromProxyType: opts.proxyType,
+ labelPromProxyParentName: opts.proxyLabels[LabelParentName],
+ }
+ // Include namespace label for proxies created for a namespaced type.
+ if isNamespacedProxyType(opts.proxyType) {
+ lbls[labelPromProxyParentNamespace] = opts.proxyLabels[LabelParentNamespace]
+ }
+ lbls[labelPromJob] = promJobName(opts)
+ return lbls
+}
+
+// promJobName constructs the value of the Prometheus job label that will apply to all metrics for a ServiceMonitor.
+func promJobName(opts *metricsOpts) string {
+ // Include parent resource namespace for proxies created for namespaced types.
+ if opts.proxyType == proxyTypeIngressResource || opts.proxyType == proxyTypeIngressService {
+ return fmt.Sprintf("ts_%s_%s_%s", opts.proxyType, opts.proxyLabels[LabelParentNamespace], opts.proxyLabels[LabelParentName])
+ }
+ return fmt.Sprintf("ts_%s_%s", opts.proxyType, opts.proxyLabels[LabelParentName])
+}
+
+// metricsSvcSelector returns the minimum label set to uniquely identify a metrics Service for a proxy.
+func metricsSvcSelector(proxyLabels map[string]string, proxyType string) map[string]string {
+ sel := map[string]string{
+ labelPromProxyType: proxyType,
+ labelPromProxyParentName: proxyLabels[LabelParentName],
+ }
+ // Include namespace label for proxies created for a namespaced type.
+ if isNamespacedProxyType(proxyType) {
+ sel[labelPromProxyParentNamespace] = proxyLabels[LabelParentNamespace]
+ }
+ return sel
+}
+
+// serviceMonitorTemplate returns a base ServiceMonitor type that, when converted to Unstructured, is a valid type that
+// can be used in kube API server calls via the c/r client.
+func serviceMonitorTemplate(name, ns string) *ServiceMonitor {
+ return &ServiceMonitor{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ServiceMonitor",
+ APIVersion: "monitoring.coreos.com/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ },
+ }
+}
+
+type metricsOpts struct {
+ proxyStsName string // name of StatefulSet for proxy
+ tsNamespace string // namespace in which Tailscale is installed
+ proxyLabels map[string]string // labels of the proxy StatefulSet
+ proxyType string
+}
+
+func isNamespacedProxyType(typ string) bool {
+ return typ == proxyTypeIngressResource || typ == proxyTypeIngressService
+}
diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go
index 52577c929acea..ef0762a1234e6 100644
--- a/cmd/k8s-operator/nameserver.go
+++ b/cmd/k8s-operator/nameserver.go
@@ -9,6 +9,7 @@ import (
"context"
"fmt"
"slices"
+ "strings"
"sync"
_ "embed"
@@ -86,7 +87,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
return reconcile.Result{}, nil
}
logger.Info("Cleaning up DNSConfig resources")
- if err := a.maybeCleanup(ctx, &dnsCfg, logger); err != nil {
+ if err := a.maybeCleanup(&dnsCfg); err != nil {
logger.Errorf("error cleaning up reconciler resource: %v", err)
return res, err
}
@@ -100,9 +101,9 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
}
oldCnStatus := dnsCfg.Status.DeepCopy()
- setStatus := func(dnsCfg *tsapi.DNSConfig, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
+ setStatus := func(dnsCfg *tsapi.DNSConfig, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetDNSConfigCondition(dnsCfg, tsapi.NameserverReady, status, reason, message, dnsCfg.Generation, a.clock, logger)
- if !apiequality.Semantic.DeepEqual(oldCnStatus, dnsCfg.Status) {
+ if !apiequality.Semantic.DeepEqual(oldCnStatus, &dnsCfg.Status) {
// An error encountered here should get returned by the Reconcile function.
if updateErr := a.Client.Status().Update(ctx, dnsCfg); updateErr != nil {
err = errors.Wrap(err, updateErr.Error())
@@ -118,7 +119,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
msg := "invalid cluster configuration: more than one tailscale.com/dnsconfigs found. Please ensure that no more than one is created."
logger.Error(msg)
a.recorder.Event(&dnsCfg, corev1.EventTypeWarning, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
- setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
+ setStatus(&dnsCfg, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
}
if !slices.Contains(dnsCfg.Finalizers, FinalizerName) {
@@ -127,11 +128,16 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
if err := a.Update(ctx, &dnsCfg); err != nil {
msg := fmt.Sprintf(messageNameserverCreationFailed, err)
logger.Error(msg)
- return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonNameserverCreationFailed, msg)
+ return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg)
}
}
if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil {
- return reconcile.Result{}, fmt.Errorf("error provisioning nameserver resources: %w", err)
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ logger.Infof("optimistic lock error, retrying: %s", err)
+ return reconcile.Result{}, nil
+ } else {
+ return reconcile.Result{}, fmt.Errorf("error provisioning nameserver resources: %w", err)
+ }
}
a.mu.Lock()
@@ -149,7 +155,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{
IP: ip,
}
- return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated)
+ return setStatus(&dnsCfg, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated)
}
logger.Info("nameserver Service does not have an IP address allocated, waiting...")
return reconcile.Result{}, nil
@@ -188,7 +194,7 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
// maybeCleanup removes DNSConfig from being tracked. The cluster resources
// created, will be automatically garbage collected as they are owned by the
// DNSConfig.
-func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error {
+func (a *NameserverReconciler) maybeCleanup(dnsCfg *tsapi.DNSConfig) error {
a.mu.Lock()
a.managedNameservers.Remove(dnsCfg.UID)
a.mu.Unlock()
diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go
index bd9c0f7bcd5b0..ebb2c4578ab93 100644
--- a/cmd/k8s-operator/operator.go
+++ b/cmd/k8s-operator/operator.go
@@ -11,6 +11,7 @@ import (
"context"
"os"
"regexp"
+ "strconv"
"strings"
"time"
@@ -23,8 +24,11 @@ import (
discoveryv1 "k8s.io/api/discovery/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
+ toolscache "k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -143,12 +147,20 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, *tailscale.Client) {
TokenURL: "https://login.tailscale.com/api/v2/oauth/token",
}
tsClient := tailscale.NewClient("-", nil)
+ tsClient.UserAgent = "tailscale-k8s-operator"
tsClient.HTTPClient = credentials.Client(context.Background())
s := &tsnet.Server{
Hostname: hostname,
Logf: zlog.Named("tailscaled").Debugf,
}
+ if p := os.Getenv("TS_PORT"); p != "" {
+ port, err := strconv.ParseUint(p, 10, 16)
+ if err != nil {
+ startlog.Fatalf("TS_PORT %q cannot be parsed as uint16: %v", p, err)
+ }
+ s.Port = uint16(port)
+ }
if kubeSecret != "" {
st, err := kubestore.New(logger.Discard, kubeSecret)
if err != nil {
@@ -230,21 +242,29 @@ func runReconcilers(opts reconcilerOpts) {
nsFilter := cache.ByObject{
Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(),
}
+ // We watch the ServiceMonitor CRD to ensure that reconcilers are re-triggered if user's workflows result in the
+ // ServiceMonitor CRD applied after some of our resources that define ServiceMonitor creation. This selector
+ // ensures that we only watch the ServiceMonitor CRD and that we don't cache full contents of it.
+ serviceMonitorSelector := cache.ByObject{
+ Field: fields.SelectorFromSet(fields.Set{"metadata.name": serviceMonitorCRD}),
+ Transform: crdTransformer(startlog),
+ }
mgrOpts := manager.Options{
// TODO (irbekrm): stricter filtering what we watch/cache/call
// reconcilers on. c/r by default starts a watch on any
// resources that we GET via the controller manager's client.
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
- &corev1.Secret{}: nsFilter,
- &corev1.ServiceAccount{}: nsFilter,
- &corev1.Pod{}: nsFilter,
- &corev1.ConfigMap{}: nsFilter,
- &appsv1.StatefulSet{}: nsFilter,
- &appsv1.Deployment{}: nsFilter,
- &discoveryv1.EndpointSlice{}: nsFilter,
- &rbacv1.Role{}: nsFilter,
- &rbacv1.RoleBinding{}: nsFilter,
+ &corev1.Secret{}: nsFilter,
+ &corev1.ServiceAccount{}: nsFilter,
+ &corev1.Pod{}: nsFilter,
+ &corev1.ConfigMap{}: nsFilter,
+ &appsv1.StatefulSet{}: nsFilter,
+ &appsv1.Deployment{}: nsFilter,
+ &discoveryv1.EndpointSlice{}: nsFilter,
+ &rbacv1.Role{}: nsFilter,
+ &rbacv1.RoleBinding{}: nsFilter,
+ &apiextensionsv1.CustomResourceDefinition{}: serviceMonitorSelector,
},
},
Scheme: tsapi.GlobalScheme,
@@ -413,8 +433,13 @@ func runReconcilers(opts reconcilerOpts) {
startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err)
}
+ // ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that
+ // define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get
+ // reconciled if the CRD is applied at a later point.
+ serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log))
err = builder.ControllerManagedBy(mgr).
For(&tsapi.ProxyClass{}).
+ Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter).
Complete(&ProxyClassReconciler{
Client: mgr.GetClient(),
recorder: eventRecorder,
@@ -1009,6 +1034,49 @@ func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns
}
}
+// proxyClassesWithServiceMonitor returns an event handler that, given that the event is for the Prometheus
+// ServiceMonitor CRD, returns all ProxyClasses that define that a ServiceMonitor should be created.
+func proxyClassesWithServiceMonitor(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
+ return func(ctx context.Context, o client.Object) []reconcile.Request {
+ crd, ok := o.(*apiextensionsv1.CustomResourceDefinition)
+ if !ok {
+ logger.Debugf("[unexpected] ServiceMonitor CRD handler received an object that is not a CustomResourceDefinition")
+ return nil
+ }
+ if crd.Name != serviceMonitorCRD {
+ logger.Debugf("[unexpected] ServiceMonitor CRD handler received an unexpected CRD %q", crd.Name)
+ return nil
+ }
+ pcl := &tsapi.ProxyClassList{}
+ if err := cl.List(ctx, pcl); err != nil {
+ logger.Debugf("[unexpected] error listing ProxyClasses: %v", err)
+ return nil
+ }
+ reqs := make([]reconcile.Request, 0)
+ for _, pc := range pcl.Items {
+ if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && pc.Spec.Metrics.ServiceMonitor.Enable {
+ reqs = append(reqs, reconcile.Request{
+ NamespacedName: types.NamespacedName{Namespace: pc.Namespace, Name: pc.Name},
+ })
+ }
+ }
+ return reqs
+ }
+}
+
+// crdTransformer gets called before a CRD is stored to c/r cache, it removes the CRD spec to reduce memory consumption.
+func crdTransformer(log *zap.SugaredLogger) toolscache.TransformFunc {
+ return func(o any) (any, error) {
+ crd, ok := o.(*apiextensionsv1.CustomResourceDefinition)
+ if !ok {
+ log.Infof("[unexpected] CRD transformer called for a non-CRD type")
+ return crd, nil
+ }
+ crd.Spec = apiextensionsv1.CustomResourceDefinitionSpec{}
+ return crd, nil
+ }
+}
+
// indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The
// index is used a list filter.
func indexEgressServices(o client.Object) []string {
diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go
index 21e1d4313749e..e46cdd7fe6e45 100644
--- a/cmd/k8s-operator/operator_test.go
+++ b/cmd/k8s-operator/operator_test.go
@@ -432,6 +432,148 @@ func TestTailnetTargetIPAnnotation(t *testing.T) {
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
}
+func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) {
+ fc := fake.NewFakeClient()
+ ft := &fakeTSClient{}
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ clock := tstest.NewClock(tstest.ClockOpts{})
+ sr := &ServiceReconciler{
+ Client: fc,
+ ssr: &tailscaleSTSReconciler{
+ Client: fc,
+ tsClient: ft,
+ defaultTags: []string{"tag:k8s"},
+ operatorNamespace: "operator-ns",
+ proxyImage: "tailscale/tailscale",
+ },
+ logger: zl.Sugar(),
+ clock: clock,
+ recorder: record.NewFakeRecorder(100),
+ }
+ tailnetTargetIP := "invalid-ip"
+ mustCreate(t, fc, &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+
+ UID: types.UID("1234-UID"),
+ Annotations: map[string]string{
+ AnnotationTailnetTargetIP: tailnetTargetIP,
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "10.20.30.40",
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerClass: ptr.To("tailscale"),
+ },
+ })
+
+ expectReconciled(t, sr, "default", "test")
+
+ t0 := conditionTime(clock)
+
+ want := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ UID: types.UID("1234-UID"),
+ Annotations: map[string]string{
+ AnnotationTailnetTargetIP: tailnetTargetIP,
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "10.20.30.40",
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerClass: ptr.To("tailscale"),
+ },
+ Status: corev1.ServiceStatus{
+ Conditions: []metav1.Condition{{
+ Type: string(tsapi.ProxyReady),
+ Status: metav1.ConditionFalse,
+ LastTransitionTime: t0,
+ Reason: reasonProxyInvalid,
+ Message: `unable to provision proxy resources: invalid Service: invalid value of annotation tailscale.com/tailnet-ip: "invalid-ip" could not be parsed as a valid IP Address, error: ParseAddr("invalid-ip"): unable to parse IP`,
+ }},
+ },
+ }
+
+ expectEqual(t, fc, want, nil)
+}
+
+func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) {
+ fc := fake.NewFakeClient()
+ ft := &fakeTSClient{}
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ clock := tstest.NewClock(tstest.ClockOpts{})
+ sr := &ServiceReconciler{
+ Client: fc,
+ ssr: &tailscaleSTSReconciler{
+ Client: fc,
+ tsClient: ft,
+ defaultTags: []string{"tag:k8s"},
+ operatorNamespace: "operator-ns",
+ proxyImage: "tailscale/tailscale",
+ },
+ logger: zl.Sugar(),
+ clock: clock,
+ recorder: record.NewFakeRecorder(100),
+ }
+ tailnetTargetIP := "999.999.999.999"
+ mustCreate(t, fc, &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+
+ UID: types.UID("1234-UID"),
+ Annotations: map[string]string{
+ AnnotationTailnetTargetIP: tailnetTargetIP,
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "10.20.30.40",
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerClass: ptr.To("tailscale"),
+ },
+ })
+
+ expectReconciled(t, sr, "default", "test")
+
+ t0 := conditionTime(clock)
+
+ want := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "default",
+ UID: types.UID("1234-UID"),
+ Annotations: map[string]string{
+ AnnotationTailnetTargetIP: tailnetTargetIP,
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "10.20.30.40",
+ Type: corev1.ServiceTypeLoadBalancer,
+ LoadBalancerClass: ptr.To("tailscale"),
+ },
+ Status: corev1.ServiceStatus{
+ Conditions: []metav1.Condition{{
+ Type: string(tsapi.ProxyReady),
+ Status: metav1.ConditionFalse,
+ LastTransitionTime: t0,
+ Reason: reasonProxyInvalid,
+ Message: `unable to provision proxy resources: invalid Service: invalid value of annotation tailscale.com/tailnet-ip: "999.999.999.999" could not be parsed as a valid IP Address, error: ParseAddr("999.999.999.999"): IPv4 field has value >255`,
+ }},
+ },
+ }
+
+ expectEqual(t, fc, want, nil)
+}
+
func TestAnnotations(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
@@ -1246,7 +1388,7 @@ func TestTailscaledConfigfileHash(t *testing.T) {
parentType: "svc",
hostname: "default-test",
clusterTargetIP: "10.20.30.40",
- confFileHash: "e09bededa0379920141cbd0b0dbdf9b8b66545877f9e8397423f5ce3e1ba439e",
+ confFileHash: "acf3467364b0a3ba9b8ee0dd772cb7c2f0bf585e288fa99b7fe4566009ed6041",
app: kubetypes.AppIngressProxy,
}
expectEqual(t, fc, expectedSTS(t, fc, o), nil)
@@ -1257,7 +1399,7 @@ func TestTailscaledConfigfileHash(t *testing.T) {
mak.Set(&svc.Annotations, AnnotationHostname, "another-test")
})
o.hostname = "another-test"
- o.confFileHash = "5d754cf55463135ee34aa9821f2fd8483b53eb0570c3740c84a086304f427684"
+ o.confFileHash = "d4cc13f09f55f4f6775689004f9a466723325b84d2b590692796bfe22aeaa389"
expectReconciled(t, sr, "default", "test")
expectEqual(t, fc, expectedSTS(t, fc, o), nil)
}
diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go
index 882a9030fa75d..b781af05adaaa 100644
--- a/cmd/k8s-operator/proxyclass.go
+++ b/cmd/k8s-operator/proxyclass.go
@@ -15,6 +15,7 @@ import (
dockerref "github.com/distribution/reference"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apivalidation "k8s.io/apimachinery/pkg/api/validation"
@@ -95,14 +96,14 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re
pcr.mu.Unlock()
oldPCStatus := pc.Status.DeepCopy()
- if errs := pcr.validate(pc); errs != nil {
+ if errs := pcr.validate(ctx, pc); errs != nil {
msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error())
pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg)
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger)
} else {
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger)
}
- if !apiequality.Semantic.DeepEqual(oldPCStatus, pc.Status) {
+ if !apiequality.Semantic.DeepEqual(oldPCStatus, &pc.Status) {
if err := pcr.Client.Status().Update(ctx, pc); err != nil {
logger.Errorf("error updating ProxyClass status: %v", err)
return reconcile.Result{}, err
@@ -111,7 +112,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re
return reconcile.Result{}, nil
}
-func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations field.ErrorList) {
+func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) {
if sts := pc.Spec.StatefulSet; sts != nil {
if len(sts.Labels) > 0 {
if errs := metavalidation.ValidateLabels(sts.Labels, field.NewPath(".spec.statefulSet.labels")); errs != nil {
@@ -160,9 +161,23 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel
violations = append(violations, field.TypeInvalid(field.NewPath("spec", "statefulSet", "pod", "tailscaleInitContainer", "image"), tc.Image, err.Error()))
}
}
+
+ if tc.Debug != nil {
+ violations = append(violations, field.TypeInvalid(field.NewPath("spec", "statefulSet", "pod", "tailscaleInitContainer", "debug"), tc.Debug, "debug settings cannot be configured on the init container"))
+ }
}
}
}
+ if pc.Spec.Metrics != nil && pc.Spec.Metrics.ServiceMonitor != nil && pc.Spec.Metrics.ServiceMonitor.Enable {
+ found, err := hasServiceMonitorCRD(ctx, pcr.Client)
+ if err != nil {
+ pcr.logger.Infof("[unexpected]: error retrieving %q CRD: %v", serviceMonitorCRD, err)
+ // best effort validation - don't error out here
+ } else if !found {
+ msg := fmt.Sprintf("ProxyClass defines that a ServiceMonitor custom resource should be created, but %q CRD was not found", serviceMonitorCRD)
+ violations = append(violations, field.TypeInvalid(field.NewPath("spec", "metrics", "serviceMonitor"), "enable", msg))
+ }
+ }
// We do not validate embedded fields (security context, resource
// requirements etc) as we inherit upstream validation for those fields.
// Invalid values would get rejected by upstream validations at apply
@@ -170,6 +185,16 @@ func (pcr *ProxyClassReconciler) validate(pc *tsapi.ProxyClass) (violations fiel
return violations
}
+func hasServiceMonitorCRD(ctx context.Context, cl client.Client) (bool, error) {
+ sm := &apiextensionsv1.CustomResourceDefinition{}
+ if err := cl.Get(ctx, types.NamespacedName{Name: serviceMonitorCRD}, sm); apierrors.IsNotFound(err) {
+ return false, nil
+ } else if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
// maybeCleanup removes tailscale.com finalizer and ensures that the ProxyClass
// is no longer counted towards k8s_proxyclass_resources.
func (pcr *ProxyClassReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, pc *tsapi.ProxyClass) error {
diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go
index eb68811fc6b94..e6e16e9f9d59f 100644
--- a/cmd/k8s-operator/proxyclass_test.go
+++ b/cmd/k8s-operator/proxyclass_test.go
@@ -8,10 +8,12 @@
package main
import (
+ "context"
"testing"
"time"
"go.uber.org/zap"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
@@ -134,4 +136,76 @@ func TestProxyClass(t *testing.T) {
"Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."}
expectReconciled(t, pcr, "", "test")
expectEvents(t, fr, expectedEvents)
+
+ // 6. A ProxyClass with ServiceMonitor enabled and in a cluster that has not ServiceMonitor CRD is invalid
+ pc.Spec.Metrics = &tsapi.Metrics{Enable: true, ServiceMonitor: &tsapi.ServiceMonitor{Enable: true}}
+ mustUpdate(t, fc, "", "test", func(proxyClass *tsapi.ProxyClass) {
+ proxyClass.Spec = pc.Spec
+ })
+ expectReconciled(t, pcr, "", "test")
+ msg = `ProxyClass is not valid: spec.metrics.serviceMonitor: Invalid value: "enable": ProxyClass defines that a ServiceMonitor custom resource should be created, but "servicemonitors.monitoring.coreos.com" CRD was not found`
+ tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar())
+ expectEqual(t, fc, pc, nil)
+ expectedEvent = "Warning ProxyClassInvalid " + msg
+ expectEvents(t, fr, []string{expectedEvent})
+
+ // 7. A ProxyClass with ServiceMonitor enabled and in a cluster that does have the ServiceMonitor CRD is valid
+ crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
+ mustCreate(t, fc, crd)
+ expectReconciled(t, pcr, "", "test")
+ tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, 0, cl, zl.Sugar())
+ expectEqual(t, fc, pc, nil)
+}
+
+func TestValidateProxyClass(t *testing.T) {
+ for name, tc := range map[string]struct {
+ pc *tsapi.ProxyClass
+ valid bool
+ }{
+ "empty": {
+ valid: true,
+ pc: &tsapi.ProxyClass{},
+ },
+ "debug_enabled_for_main_container": {
+ valid: true,
+ pc: &tsapi.ProxyClass{
+ Spec: tsapi.ProxyClassSpec{
+ StatefulSet: &tsapi.StatefulSet{
+ Pod: &tsapi.Pod{
+ TailscaleContainer: &tsapi.Container{
+ Debug: &tsapi.Debug{
+ Enable: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "debug_enabled_for_init_container": {
+ valid: false,
+ pc: &tsapi.ProxyClass{
+ Spec: tsapi.ProxyClassSpec{
+ StatefulSet: &tsapi.StatefulSet{
+ Pod: &tsapi.Pod{
+ TailscaleInitContainer: &tsapi.Container{
+ Debug: &tsapi.Debug{
+ Enable: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ pcr := &ProxyClassReconciler{}
+ err := pcr.validate(context.Background(), tc.pc)
+ valid := err == nil
+ if valid != tc.valid {
+ t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err)
+ }
+ })
+ }
}
diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go
index 1f9983aa98962..39b7ccc01f6fb 100644
--- a/cmd/k8s-operator/proxygroup.go
+++ b/cmd/k8s-operator/proxygroup.go
@@ -12,6 +12,7 @@ import (
"fmt"
"net/http"
"slices"
+ "strings"
"sync"
"github.com/pkg/errors"
@@ -45,9 +46,12 @@ const (
reasonProxyGroupReady = "ProxyGroupReady"
reasonProxyGroupCreating = "ProxyGroupCreating"
reasonProxyGroupInvalid = "ProxyGroupInvalid"
+
+ // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c
+ optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
)
-var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupCount)
+var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount)
// ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition.
type ProxyGroupReconciler struct {
@@ -110,7 +114,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ
oldPGStatus := pg.Status.DeepCopy()
setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger)
- if !apiequality.Semantic.DeepEqual(oldPGStatus, pg.Status) {
+ if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) {
// An error encountered here should get returned by the Reconcile function.
if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil {
err = errors.Wrap(err, updateErr.Error())
@@ -166,9 +170,17 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ
}
if err = r.maybeProvision(ctx, pg, proxyClass); err != nil {
- err = fmt.Errorf("error provisioning ProxyGroup resources: %w", err)
- r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error())
- return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error())
+ reason := reasonProxyGroupCreationFailed
+ msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err)
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ reason = reasonProxyGroupCreating
+ msg = fmt.Sprintf("optimistic lock error, retrying: %s", err)
+ err = nil
+ logger.Info(msg)
+ } else {
+ r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg)
+ }
+ return setStatusReady(pg, metav1.ConditionFalse, reason, msg)
}
desiredReplicas := int(pgReplicas(pg))
@@ -259,6 +271,15 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro
}); err != nil {
return fmt.Errorf("error provisioning StatefulSet: %w", err)
}
+ mo := &metricsOpts{
+ tsNamespace: r.tsNamespace,
+ proxyStsName: pg.Name,
+ proxyLabels: pgLabels(pg.Name, nil),
+ proxyType: "proxygroup",
+ }
+ if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil {
+ return fmt.Errorf("error reconciling metrics resources: %w", err)
+ }
if err := r.cleanupDanglingResources(ctx, pg); err != nil {
return fmt.Errorf("error cleaning up dangling resources: %w", err)
@@ -327,6 +348,14 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy
}
}
+ mo := &metricsOpts{
+ proxyLabels: pgLabels(pg.Name, nil),
+ tsNamespace: r.tsNamespace,
+ proxyType: "proxygroup"}
+ if err := maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil {
+ return false, fmt.Errorf("error cleaning up metrics resources: %w", err)
+ }
+
logger.Infof("cleaned up ProxyGroup resources")
r.mu.Lock()
r.proxyGroups.Remove(pg.UID)
@@ -353,7 +382,7 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc
func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) {
logger := r.logger(pg.Name)
- var allConfigs []tailscaledConfigs
+ var configSHA256Sum string
for i := range pgReplicas(pg) {
cfgSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -389,7 +418,6 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
if err != nil {
return "", fmt.Errorf("error creating tailscaled config: %w", err)
}
- allConfigs = append(allConfigs, configs)
for cap, cfg := range configs {
cfgJSON, err := json.Marshal(cfg)
@@ -399,6 +427,32 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
mak.Set(&cfgSecret.StringData, tsoperator.TailscaledConfigFileName(cap), string(cfgJSON))
}
+ // The config sha256 sum is a value for a hash annotation used to trigger
+ // pod restarts when tailscaled config changes. Any config changes apply
+ // to all replicas, so it is sufficient to only hash the config for the
+ // first replica.
+ //
+ // In future, we're aiming to eliminate restarts altogether and have
+ // pods dynamically reload their config when it changes.
+ if i == 0 {
+ sum := sha256.New()
+ for _, cfg := range configs {
+ // Zero out the auth key so it doesn't affect the sha256 hash when we
+ // remove it from the config after the pods have all authed. Otherwise
+ // all the pods will need to restart immediately after authing.
+ cfg.AuthKey = nil
+ b, err := json.Marshal(cfg)
+ if err != nil {
+ return "", err
+ }
+ if _, err := sum.Write(b); err != nil {
+ return "", err
+ }
+ }
+
+ configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil))
+ }
+
if existingCfgSecret != nil {
logger.Debugf("patching the existing ProxyGroup config Secret %s", cfgSecret.Name)
if err := r.Patch(ctx, cfgSecret, client.MergeFrom(existingCfgSecret)); err != nil {
@@ -412,16 +466,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p
}
}
- sum := sha256.New()
- b, err := json.Marshal(allConfigs)
- if err != nil {
- return "", err
- }
- if _, err := sum.Write(b); err != nil {
- return "", err
- }
-
- return fmt.Sprintf("%x", sum.Sum(nil)), nil
+ return configSHA256Sum, nil
}
func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) {
diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go
index 9aa7ac3b008a3..b47cb39b1e9c6 100644
--- a/cmd/k8s-operator/proxygroup_specs.go
+++ b/cmd/k8s-operator/proxygroup_specs.go
@@ -15,6 +15,7 @@ import (
"sigs.k8s.io/yaml"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/kube/egressservices"
+ "tailscale.com/kube/kubetypes"
"tailscale.com/types/ptr"
)
@@ -92,6 +93,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa
c.Image = image
c.VolumeMounts = func() []corev1.VolumeMount {
var mounts []corev1.VolumeMount
+
+ // TODO(tomhjp): Read config directly from the secret instead. The
+ // mounts change on scaling up/down which causes unnecessary restarts
+ // for pods that haven't meaningfully changed.
for i := range pgReplicas(pg) {
mounts = append(mounts, corev1.VolumeMount{
Name: fmt.Sprintf("tailscaledconfig-%d", i),
@@ -121,15 +126,6 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa
},
},
},
- {
- Name: "POD_NAME",
- ValueFrom: &corev1.EnvVarSource{
- FieldRef: &corev1.ObjectFieldSelector{
- // Secret is named after the pod.
- FieldPath: "metadata.name",
- },
- },
- },
{
Name: "TS_KUBE_SECRET",
Value: "$(POD_NAME)",
@@ -143,8 +139,8 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa
Value: "/etc/tsconfig/$(POD_NAME)",
},
{
- Name: "TS_USERSPACE",
- Value: "false",
+ Name: "TS_INTERNAL_APP",
+ Value: kubetypes.AppProxyGroupEgress,
},
}
@@ -162,7 +158,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHa
})
}
- return envs
+ return append(c.Env, envs...)
}()
return ss, nil
@@ -206,6 +202,15 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role {
return secrets
}(),
},
+ {
+ APIGroups: []string{""},
+ Resources: []string{"events"},
+ Verbs: []string{
+ "create",
+ "patch",
+ "get",
+ },
+ },
},
}
}
diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go
index 445db7537ddb6..9c4df9e4f9302 100644
--- a/cmd/k8s-operator/proxygroup_test.go
+++ b/cmd/k8s-operator/proxygroup_test.go
@@ -17,6 +17,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -35,6 +36,8 @@ var defaultProxyClassAnnotations = map[string]string{
}
func TestProxyGroup(t *testing.T) {
+ const initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196"
+
pc := &tsapi.ProxyClass{
ObjectMeta: metav1.ObjectMeta{
Name: "default-pc",
@@ -74,12 +77,20 @@ func TestProxyGroup(t *testing.T) {
l: zl.Sugar(),
clock: cl,
}
+ crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}}
+ opts := configOpts{
+ proxyType: "proxygroup",
+ stsName: pg.Name,
+ parentType: "proxygroup",
+ tailscaleNamespace: "tailscale",
+ }
t.Run("proxyclass_not_ready", func(t *testing.T) {
expectReconciled(t, reconciler, "", pg.Name)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar())
expectEqual(t, fc, pg, nil)
+ expectProxyGroupResources(t, fc, pg, false, "")
})
t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) {
@@ -100,10 +111,11 @@ func TestProxyGroup(t *testing.T) {
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar())
expectEqual(t, fc, pg, nil)
+ expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
if expected := 1; reconciler.proxyGroups.Len() != expected {
t.Fatalf("expected %d recorders, got %d", expected, reconciler.proxyGroups.Len())
}
- expectProxyGroupResources(t, fc, pg, true)
+ expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
keyReq := tailscale.KeyCapabilities{
Devices: tailscale.KeyDeviceCapabilities{
Create: tailscale.KeyDeviceCreateCapabilities{
@@ -135,7 +147,7 @@ func TestProxyGroup(t *testing.T) {
}
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar())
expectEqual(t, fc, pg, nil)
- expectProxyGroupResources(t, fc, pg, true)
+ expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
})
t.Run("scale_up_to_3", func(t *testing.T) {
@@ -146,6 +158,7 @@ func TestProxyGroup(t *testing.T) {
expectReconciled(t, reconciler, "", pg.Name)
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar())
expectEqual(t, fc, pg, nil)
+ expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
addNodeIDToStateSecrets(t, fc, pg)
expectReconciled(t, reconciler, "", pg.Name)
@@ -155,7 +168,7 @@ func TestProxyGroup(t *testing.T) {
TailnetIPs: []string{"1.2.3.4", "::1"},
})
expectEqual(t, fc, pg, nil)
- expectProxyGroupResources(t, fc, pg, true)
+ expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
})
t.Run("scale_down_to_1", func(t *testing.T) {
@@ -163,11 +176,47 @@ func TestProxyGroup(t *testing.T) {
mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) {
p.Spec = pg.Spec
})
+
expectReconciled(t, reconciler, "", pg.Name)
+
pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device.
expectEqual(t, fc, pg, nil)
+ expectProxyGroupResources(t, fc, pg, true, initialCfgHash)
+ })
+
+ t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) {
+ pc.Spec.TailscaleConfig = &tsapi.TailscaleConfig{
+ AcceptRoutes: true,
+ }
+ mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
+ p.Spec = pc.Spec
+ })
+
+ expectReconciled(t, reconciler, "", pg.Name)
+
+ expectEqual(t, fc, pg, nil)
+ expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74")
+ })
- expectProxyGroupResources(t, fc, pg, true)
+ t.Run("enable_metrics", func(t *testing.T) {
+ pc.Spec.Metrics = &tsapi.Metrics{Enable: true}
+ mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
+ p.Spec = pc.Spec
+ })
+ expectReconciled(t, reconciler, "", pg.Name)
+ expectEqual(t, fc, expectedMetricsService(opts), nil)
+ })
+ t.Run("enable_service_monitor_no_crd", func(t *testing.T) {
+ pc.Spec.Metrics.ServiceMonitor = &tsapi.ServiceMonitor{Enable: true}
+ mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) {
+ p.Spec.Metrics = pc.Spec.Metrics
+ })
+ expectReconciled(t, reconciler, "", pg.Name)
+ })
+ t.Run("create_crd_expect_service_monitor", func(t *testing.T) {
+ mustCreate(t, fc, crd)
+ expectReconciled(t, reconciler, "", pg.Name)
+ expectEqualUnstructured(t, fc, expectedServiceMonitor(t, opts))
})
t.Run("delete_and_cleanup", func(t *testing.T) {
@@ -177,7 +226,7 @@ func TestProxyGroup(t *testing.T) {
expectReconciled(t, reconciler, "", pg.Name)
- expectMissing[tsapi.Recorder](t, fc, "", pg.Name)
+ expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name)
if expected := 0; reconciler.proxyGroups.Len() != expected {
t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len())
}
@@ -186,18 +235,19 @@ func TestProxyGroup(t *testing.T) {
if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-1", "nodeid-2", "nodeid-0"}); diff != "" {
t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff)
}
+ expectMissing[corev1.Service](t, reconciler, "tailscale", metricsResourceName(pg.Name))
// The fake client does not clean up objects whose owner has been
// deleted, so we can't test for the owned resources getting deleted.
})
}
-func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool) {
+func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string) {
t.Helper()
role := pgRole(pg, tsNamespace)
roleBinding := pgRoleBinding(pg, tsNamespace)
serviceAccount := pgServiceAccount(pg, tsNamespace)
- statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "")
+ statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", cfgHash)
if err != nil {
t.Fatal(err)
}
@@ -207,9 +257,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox
expectEqual(t, fc, role, nil)
expectEqual(t, fc, roleBinding, nil)
expectEqual(t, fc, serviceAccount, nil)
- expectEqual(t, fc, statefulSet, func(ss *appsv1.StatefulSet) {
- ss.Spec.Template.Annotations[podAnnotationLastSetConfigFileHash] = ""
- })
+ expectEqual(t, fc, statefulSet, nil)
} else {
expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name)
expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name)
@@ -218,11 +266,13 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox
}
var expectedSecrets []string
- for i := range pgReplicas(pg) {
- expectedSecrets = append(expectedSecrets,
- fmt.Sprintf("%s-%d", pg.Name, i),
- fmt.Sprintf("%s-%d-config", pg.Name, i),
- )
+ if shouldExist {
+ for i := range pgReplicas(pg) {
+ expectedSecrets = append(expectedSecrets,
+ fmt.Sprintf("%s-%d", pg.Name, i),
+ fmt.Sprintf("%s-%d-config", pg.Name, i),
+ )
+ }
}
expectSecrets(t, fc, expectedSecrets)
}
diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go
index 6378a82636939..ff7c074a8b425 100644
--- a/cmd/k8s-operator/sts.go
+++ b/cmd/k8s-operator/sts.go
@@ -15,6 +15,7 @@ import (
"net/http"
"os"
"slices"
+ "strconv"
"strings"
"go.uber.org/zap"
@@ -94,6 +95,12 @@ const (
podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn"
// podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents.
podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash"
+
+ proxyTypeEgress = "egress_service"
+ proxyTypeIngressService = "ingress_service"
+ proxyTypeIngressResource = "ingress_resource"
+ proxyTypeConnector = "connector"
+ proxyTypeProxyGroup = "proxygroup"
)
var (
@@ -122,6 +129,8 @@ type tailscaleSTSConfig struct {
Hostname string
Tags []string // if empty, use defaultTags
+ proxyType string
+
// Connector specifies a configuration of a Connector instance if that's
// what this StatefulSet should be created for.
Connector *connector
@@ -132,10 +141,13 @@ type tailscaleSTSConfig struct {
}
type connector struct {
- // routes is a list of subnet routes that this Connector should expose.
+ // routes is a list of routes that this Connector should advertise either as a subnet router or as an app
+ // connector.
routes string
// isExitNode defines whether this Connector should act as an exit node.
isExitNode bool
+ // isAppConnector defines whether this Connector should act as an app connector.
+ isAppConnector bool
}
type tsnetServer interface {
CertDomains() []string
@@ -186,22 +198,30 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga
}
sts.ProxyClass = proxyClass
- secretName, tsConfigHash, configs, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
+ secretName, tsConfigHash, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc)
if err != nil {
return nil, fmt.Errorf("failed to create or get API key secret: %w", err)
}
- _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash, configs)
+ _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash)
if err != nil {
return nil, fmt.Errorf("failed to reconcile statefulset: %w", err)
}
-
+ mo := &metricsOpts{
+ proxyStsName: hsvc.Name,
+ tsNamespace: hsvc.Namespace,
+ proxyLabels: hsvc.Labels,
+ proxyType: sts.proxyType,
+ }
+ if err = reconcileMetricsResources(ctx, logger, mo, sts.ProxyClass, a.Client); err != nil {
+ return nil, fmt.Errorf("failed to ensure metrics resources: %w", err)
+ }
return hsvc, nil
}
// Cleanup removes all resources associated that were created by Provision with
// the given labels. It returns true when all resources have been removed,
// otherwise it returns false and the caller should retry later.
-func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string) (done bool, _ error) {
+func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) {
// Need to delete the StatefulSet first, and delete it with foreground
// cascading deletion. That way, the pod that's writing to the Secret will
// stop running before we start looking at the Secret's contents, and
@@ -227,21 +247,21 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare
return false, nil
}
- id, _, _, err := a.DeviceInfo(ctx, labels)
+ dev, err := a.DeviceInfo(ctx, labels, logger)
if err != nil {
return false, fmt.Errorf("getting device info: %w", err)
}
- if id != "" {
- logger.Debugf("deleting device %s from control", string(id))
- if err := a.tsClient.DeleteDevice(ctx, string(id)); err != nil {
+ if dev != nil && dev.id != "" {
+ logger.Debugf("deleting device %s from control", string(dev.id))
+ if err := a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil {
errResp := &tailscale.ErrResponse{}
if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound {
- logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id))
+ logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id))
} else {
return false, fmt.Errorf("deleting device: %w", err)
}
} else {
- logger.Debugf("device %s deleted from control", string(id))
+ logger.Debugf("device %s deleted from control", string(dev.id))
}
}
@@ -254,6 +274,14 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare
return false, err
}
}
+ mo := &metricsOpts{
+ proxyLabels: labels,
+ tsNamespace: a.operatorNamespace,
+ proxyType: typ,
+ }
+ if err := maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil {
+ return false, fmt.Errorf("error cleaning up metrics resources: %w", err)
+ }
return true, nil
}
@@ -413,40 +441,66 @@ func sanitizeConfigBytes(c ipn.ConfigVAlpha) string {
// that acts as an operator proxy. It retrieves info from a Kubernetes Secret
// labeled with the provided labels.
// Either of device ID, hostname and IPs can be empty string if not found in the Secret.
-func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string) (id tailcfg.StableNodeID, hostname string, ips []string, err error) {
+func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) {
sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels)
if err != nil {
- return "", "", nil, err
+ return dev, err
}
if sec == nil {
- return "", "", nil, nil
+ return dev, nil
}
+ pod := new(corev1.Pod)
+ if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) {
+ return dev, nil
+ }
+
+ return deviceInfo(sec, pod, logger)
+}
- return deviceInfo(sec)
+// device contains tailscale state of a proxy device as gathered from its tailscale state Secret.
+type device struct {
+ id tailcfg.StableNodeID // device's stable ID
+ hostname string // MagicDNS name of the device
+ ips []string // Tailscale IPs of the device
+ // ingressDNSName is the L7 Ingress DNS name. In practice this will be the same value as hostname, but only set
+ // when the device has been configured to serve traffic on it via 'tailscale serve'.
+ ingressDNSName string
}
-func deviceInfo(sec *corev1.Secret) (id tailcfg.StableNodeID, hostname string, ips []string, err error) {
- id = tailcfg.StableNodeID(sec.Data["device_id"])
+func deviceInfo(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) (dev *device, err error) {
+ id := tailcfg.StableNodeID(sec.Data[kubetypes.KeyDeviceID])
if id == "" {
- return "", "", nil, nil
+ return dev, nil
}
+ dev = &device{id: id}
// Kubernetes chokes on well-formed FQDNs with the trailing dot, so we have
// to remove it.
- hostname = strings.TrimSuffix(string(sec.Data["device_fqdn"]), ".")
- if hostname == "" {
+ dev.hostname = strings.TrimSuffix(string(sec.Data[kubetypes.KeyDeviceFQDN]), ".")
+ if dev.hostname == "" {
// Device ID gets stored and retrieved in a different flow than
// FQDN and IPs. A device that acts as Kubernetes operator
- // proxy, but whose route setup has failed might have an device
+ // proxy, but whose route setup has failed might have a device
// ID, but no FQDN/IPs. If so, return the ID, to allow the
// operator to clean up such devices.
- return id, "", nil, nil
+ return dev, nil
+ }
+ // TODO(irbekrm): we fall back to using the hostname field to determine Ingress's hostname to ensure backwards
+ // compatibility. In 1.82 we can remove this fallback mechanism.
+ dev.ingressDNSName = dev.hostname
+ if proxyCapVer(sec, pod, log) >= 109 {
+ dev.ingressDNSName = strings.TrimSuffix(string(sec.Data[kubetypes.KeyHTTPSEndpoint]), ".")
+ if strings.EqualFold(dev.ingressDNSName, kubetypes.ValueNoHTTPS) {
+ dev.ingressDNSName = ""
+ }
}
- if rawDeviceIPs, ok := sec.Data["device_ips"]; ok {
+ if rawDeviceIPs, ok := sec.Data[kubetypes.KeyDeviceIPs]; ok {
+ ips := make([]string, 0)
if err := json.Unmarshal(rawDeviceIPs, &ips); err != nil {
- return "", "", nil, err
+ return nil, err
}
+ dev.ips = ips
}
- return id, hostname, ips, nil
+ return dev, nil
}
func newAuthKey(ctx context.Context, tsClient tsClient, tags []string) (string, error) {
@@ -473,7 +527,7 @@ var proxyYaml []byte
//go:embed deploy/manifests/userspace-proxy.yaml
var userspaceProxyYaml []byte
-func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string, configs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) (*appsv1.StatefulSet, error) {
+func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) {
ss := new(appsv1.StatefulSet)
if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding
if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil {
@@ -518,11 +572,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S
Name: "TS_KUBE_SECRET",
Value: proxySecret,
},
- corev1.EnvVar{
- // Old tailscaled config key is still used for backwards compatibility.
- Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH",
- Value: "/etc/tsconfig/tailscaled",
- },
corev1.EnvVar{
// New style is in the form of cap-.hujson.
Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR",
@@ -668,24 +717,42 @@ func mergeStatefulSetLabelsOrAnnots(current, custom map[string]string, managed [
return custom
}
+func debugSetting(pc *tsapi.ProxyClass) bool {
+ if pc == nil ||
+ pc.Spec.StatefulSet == nil ||
+ pc.Spec.StatefulSet.Pod == nil ||
+ pc.Spec.StatefulSet.Pod.TailscaleContainer == nil ||
+ pc.Spec.StatefulSet.Pod.TailscaleContainer.Debug == nil {
+ // This default will change to false in 1.82.0.
+ return pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable
+ }
+
+ return pc.Spec.StatefulSet.Pod.TailscaleContainer.Debug.Enable
+}
+
func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, stsCfg *tailscaleSTSConfig, logger *zap.SugaredLogger) *appsv1.StatefulSet {
if pc == nil || ss == nil {
return ss
}
- if stsCfg != nil && pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable {
- if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy {
- enableMetrics(ss, pc)
- } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy {
+
+ metricsEnabled := pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable
+ debugEnabled := debugSetting(pc)
+ if metricsEnabled || debugEnabled {
+ isEgress := stsCfg != nil && (stsCfg.TailnetTargetFQDN != "" || stsCfg.TailnetTargetIP != "")
+ isForwardingL7Ingress := stsCfg != nil && stsCfg.ForwardClusterTrafficViaL7IngressProxy
+ if isEgress {
// TODO (irbekrm): fix this
// For Ingress proxies that have been configured with
// tailscale.com/experimental-forward-cluster-traffic-via-ingress
// annotation, all cluster traffic is forwarded to the
// Ingress backend(s).
- logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
- } else {
+ logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for egress proxies.")
+ } else if isForwardingL7Ingress {
// TODO (irbekrm): fix this
// For egress proxies, currently all cluster traffic is forwarded to the tailnet target.
logger.Info("ProxyClass specifies that metrics should be enabled, but this is currently not supported for Ingress proxies that accept cluster traffic.")
+ } else {
+ enableEndpoints(ss, metricsEnabled, debugEnabled)
}
}
@@ -718,6 +785,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
ss.Spec.Template.Spec.NodeSelector = wantsPod.NodeSelector
ss.Spec.Template.Spec.Affinity = wantsPod.Affinity
ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations
+ ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints
// Update containers.
updateContainer := func(overlay *tsapi.Container, base corev1.Container) corev1.Container {
@@ -762,16 +830,58 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet,
return ss
}
-func enableMetrics(ss *appsv1.StatefulSet, pc *tsapi.ProxyClass) {
+func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) {
for i, c := range ss.Spec.Template.Spec.Containers {
if c.Name == "tailscale" {
- // Serve metrics on on :9001/debug/metrics. If
- // we didn't specify Pod IP here, the proxy would, in
- // some cases, also listen to its Tailscale IP- we don't
- // want folks to start relying on this side-effect as a
- // feature.
- ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(POD_IP):9001"})
- ss.Spec.Template.Spec.Containers[i].Ports = append(ss.Spec.Template.Spec.Containers[i].Ports, corev1.ContainerPort{Name: "metrics", Protocol: "TCP", HostPort: 9001, ContainerPort: 9001})
+ if debug {
+ ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env,
+ // Serve tailscaled's debug metrics on on
+ // :9001/debug/metrics. If we didn't specify Pod IP
+ // here, the proxy would, in some cases, also listen to its
+ // Tailscale IP- we don't want folks to start relying on this
+ // side-effect as a feature.
+ corev1.EnvVar{
+ Name: "TS_DEBUG_ADDR_PORT",
+ Value: "$(POD_IP):9001",
+ },
+ // TODO(tomhjp): Can remove this env var once 1.76.x is no
+ // longer supported.
+ corev1.EnvVar{
+ Name: "TS_TAILSCALED_EXTRA_ARGS",
+ Value: "--debug=$(TS_DEBUG_ADDR_PORT)",
+ },
+ )
+
+ ss.Spec.Template.Spec.Containers[i].Ports = append(ss.Spec.Template.Spec.Containers[i].Ports,
+ corev1.ContainerPort{
+ Name: "debug",
+ Protocol: "TCP",
+ ContainerPort: 9001,
+ },
+ )
+ }
+
+ if metrics {
+ ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env,
+ // Serve client metrics on :9002/metrics.
+ corev1.EnvVar{
+ Name: "TS_LOCAL_ADDR_PORT",
+ Value: "$(POD_IP):9002",
+ },
+ corev1.EnvVar{
+ Name: "TS_ENABLE_METRICS",
+ Value: "true",
+ },
+ )
+ ss.Spec.Template.Spec.Containers[i].Ports = append(ss.Spec.Template.Spec.Containers[i].Ports,
+ corev1.ContainerPort{
+ Name: "metrics",
+ Protocol: "TCP",
+ ContainerPort: 9002,
+ },
+ )
+ }
+
break
}
}
@@ -785,15 +895,9 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) {
return origConf.AuthKey, nil
}
-// tailscaledConfig takes a proxy config, a newly generated auth key if
-// generated and a Secret with the previous proxy state and auth key and
-// returns tailscaled configuration and a hash of that configuration.
-//
-// As of 2024-05-09 it also returns legacy tailscaled config without the
-// later added NoStatefulFilter field to support proxies older than cap95.
-// TODO (irbekrm): remove the legacy config once we no longer need to support
-// versions older than cap94,
-// https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies
+// tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy
+// state and auth key and returns tailscaled config files for currently supported proxy versions and a hash of that
+// configuration.
func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) {
conf := &ipn.ConfigVAlpha{
Version: "alpha0",
@@ -801,21 +905,19 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co
AcceptRoutes: "false", // AcceptRoutes defaults to true
Locked: "false",
Hostname: &stsC.Hostname,
- NoStatefulFiltering: "false",
+ NoStatefulFiltering: "true", // Explicitly enforce default value, see #14216
+ AppConnector: &ipn.AppConnectorPrefs{Advertise: false},
}
- // For egress proxies only, we need to ensure that stateful filtering is
- // not in place so that traffic from cluster can be forwarded via
- // Tailscale IPs.
- if stsC.TailnetTargetFQDN != "" || stsC.TailnetTargetIP != "" {
- conf.NoStatefulFiltering = "true"
- }
if stsC.Connector != nil {
routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode)
if err != nil {
return nil, fmt.Errorf("error calculating routes: %w", err)
}
conf.AdvertiseRoutes = routes
+ if stsC.Connector.isAppConnector {
+ conf.AppConnector.Advertise = true
+ }
}
if shouldAcceptRoutes(stsC.ProxyClass) {
conf.AcceptRoutes = "true"
@@ -830,11 +932,13 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co
}
conf.AuthKey = key
}
+
capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha)
+ capVerConfigs[107] = *conf
+
+ // AppConnector config option is only understood by clients of capver 107 and newer.
+ conf.AppConnector = nil
capVerConfigs[95] = *conf
- // legacy config should not contain NoStatefulFiltering field.
- conf.NoStatefulFiltering.Clear()
- capVerConfigs[94] = *conf
return capVerConfigs, nil
}
@@ -1007,3 +1111,23 @@ func nameForService(svc *corev1.Service) string {
func isValidFirewallMode(m string) bool {
return m == "auto" || m == "nftables" || m == "iptables"
}
+
+// proxyCapVer accepts a proxy state Secret and a proxy Pod returns the capability version of a proxy Pod.
+// This is best effort - if the capability version can not (currently) be determined, it returns -1.
+func proxyCapVer(sec *corev1.Secret, pod *corev1.Pod, log *zap.SugaredLogger) tailcfg.CapabilityVersion {
+ if sec == nil || pod == nil {
+ return tailcfg.CapabilityVersion(-1)
+ }
+ if len(sec.Data[kubetypes.KeyCapVer]) == 0 || len(sec.Data[kubetypes.KeyPodUID]) == 0 {
+ return tailcfg.CapabilityVersion(-1)
+ }
+ capVer, err := strconv.Atoi(string(sec.Data[kubetypes.KeyCapVer]))
+ if err != nil {
+ log.Infof("[unexpected]: unexpected capability version in proxy's state Secret, expected an integer, got %q", string(sec.Data[kubetypes.KeyCapVer]))
+ return tailcfg.CapabilityVersion(-1)
+ }
+ if !strings.EqualFold(string(pod.ObjectMeta.UID), string(sec.Data[kubetypes.KeyPodUID])) {
+ return tailcfg.CapabilityVersion(-1)
+ }
+ return tailcfg.CapabilityVersion(capVer)
+}
diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go
index b2b2c8b93a2d7..05aafaee6a5d4 100644
--- a/cmd/k8s-operator/sts_test.go
+++ b/cmd/k8s-operator/sts_test.go
@@ -18,6 +18,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
"tailscale.com/types/ptr"
@@ -73,6 +74,16 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"},
Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}},
Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}},
+ TopologySpreadConstraints: []corev1.TopologySpreadConstraint{
+ {
+ WhenUnsatisfiable: "DoNotSchedule",
+ TopologyKey: "kubernetes.io/hostname",
+ MaxSkew: 3,
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"foo": "bar"},
+ },
+ },
+ },
TailscaleContainer: &tsapi.Container{
SecurityContext: &corev1.SecurityContext{
Privileged: ptr.To(true),
@@ -114,10 +125,26 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
},
},
}
- proxyClassMetrics := &tsapi.ProxyClass{
- Spec: tsapi.ProxyClassSpec{
- Metrics: &tsapi.Metrics{Enable: true},
- },
+
+ proxyClassWithMetricsDebug := func(metrics bool, debug *bool) *tsapi.ProxyClass {
+ return &tsapi.ProxyClass{
+ Spec: tsapi.ProxyClassSpec{
+ Metrics: &tsapi.Metrics{Enable: metrics},
+ StatefulSet: func() *tsapi.StatefulSet {
+ if debug == nil {
+ return nil
+ }
+
+ return &tsapi.StatefulSet{
+ Pod: &tsapi.Pod{
+ TailscaleContainer: &tsapi.Container{
+ Debug: &tsapi.Debug{Enable: *debug},
+ },
+ },
+ }
+ }(),
+ },
+ }
}
var userspaceProxySS, nonUserspaceProxySS appsv1.StatefulSet
@@ -159,6 +186,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
wantSS.Spec.Template.Spec.NodeSelector = proxyClassAllOpts.Spec.StatefulSet.Pod.NodeSelector
wantSS.Spec.Template.Spec.Affinity = proxyClassAllOpts.Spec.StatefulSet.Pod.Affinity
wantSS.Spec.Template.Spec.Tolerations = proxyClassAllOpts.Spec.StatefulSet.Pod.Tolerations
+ wantSS.Spec.Template.Spec.TopologySpreadConstraints = proxyClassAllOpts.Spec.StatefulSet.Pod.TopologySpreadConstraints
wantSS.Spec.Template.Spec.Containers[0].SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.SecurityContext
wantSS.Spec.Template.Spec.InitContainers[0].SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleInitContainer.SecurityContext
wantSS.Spec.Template.Spec.Containers[0].Resources = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.Resources
@@ -172,7 +200,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
if diff := cmp.Diff(gotSS, wantSS); diff != "" {
- t.Fatalf("Unexpected result applying ProxyClass with all fields set to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff)
+ t.Errorf("Unexpected result applying ProxyClass with all fields set to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff)
}
// 2. Test that a ProxyClass with custom labels and annotations for
@@ -185,7 +213,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
wantSS.Spec.Template.Annotations = proxyClassJustLabels.Spec.StatefulSet.Pod.Annotations
gotSS = applyProxyClassToStatefulSet(proxyClassJustLabels, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
if diff := cmp.Diff(gotSS, wantSS); diff != "" {
- t.Fatalf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff)
+ t.Errorf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for non-userspace proxy (-got +want):\n%s", diff)
}
// 3. Test that a ProxyClass with all fields set gets correctly applied
@@ -201,6 +229,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
wantSS.Spec.Template.Spec.NodeSelector = proxyClassAllOpts.Spec.StatefulSet.Pod.NodeSelector
wantSS.Spec.Template.Spec.Affinity = proxyClassAllOpts.Spec.StatefulSet.Pod.Affinity
wantSS.Spec.Template.Spec.Tolerations = proxyClassAllOpts.Spec.StatefulSet.Pod.Tolerations
+ wantSS.Spec.Template.Spec.TopologySpreadConstraints = proxyClassAllOpts.Spec.StatefulSet.Pod.TopologySpreadConstraints
wantSS.Spec.Template.Spec.Containers[0].SecurityContext = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.SecurityContext
wantSS.Spec.Template.Spec.Containers[0].Resources = proxyClassAllOpts.Spec.StatefulSet.Pod.TailscaleContainer.Resources
wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: "foo", Value: "bar"}, {Name: "TS_USERSPACE", Value: "true"}, {Name: "bar"}}...)
@@ -208,7 +237,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething"
gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
if diff := cmp.Diff(gotSS, wantSS); diff != "" {
- t.Fatalf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff)
+ t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff)
}
// 4. Test that a ProxyClass with custom labels and annotations gets correctly applied
@@ -220,16 +249,48 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) {
wantSS.Spec.Template.Annotations = proxyClassJustLabels.Spec.StatefulSet.Pod.Annotations
gotSS = applyProxyClassToStatefulSet(proxyClassJustLabels, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
if diff := cmp.Diff(gotSS, wantSS); diff != "" {
- t.Fatalf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for a userspace proxy (-got +want):\n%s", diff)
+ t.Errorf("Unexpected result applying ProxyClass with custom labels and annotations to a StatefulSet for a userspace proxy (-got +want):\n%s", diff)
+ }
+
+ // 5. Metrics enabled defaults to enabling both metrics and debug.
+ wantSS = nonUserspaceProxySS.DeepCopy()
+ wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env,
+ corev1.EnvVar{Name: "TS_DEBUG_ADDR_PORT", Value: "$(POD_IP):9001"},
+ corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"},
+ corev1.EnvVar{Name: "TS_LOCAL_ADDR_PORT", Value: "$(POD_IP):9002"},
+ corev1.EnvVar{Name: "TS_ENABLE_METRICS", Value: "true"},
+ )
+ wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{
+ {Name: "debug", Protocol: "TCP", ContainerPort: 9001},
+ {Name: "metrics", Protocol: "TCP", ContainerPort: 9002},
+ }
+ gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, nil), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
+ if diff := cmp.Diff(gotSS, wantSS); diff != "" {
+ t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff)
+ }
+
+ // 6. Enable _just_ metrics by explicitly disabling debug.
+ wantSS = nonUserspaceProxySS.DeepCopy()
+ wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env,
+ corev1.EnvVar{Name: "TS_LOCAL_ADDR_PORT", Value: "$(POD_IP):9002"},
+ corev1.EnvVar{Name: "TS_ENABLE_METRICS", Value: "true"},
+ )
+ wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9002}}
+ gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, ptr.To(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
+ if diff := cmp.Diff(gotSS, wantSS); diff != "" {
+ t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff)
}
- // 5. Test that a ProxyClass with metrics enabled gets correctly applied to a StatefulSet.
+ // 7. Enable _just_ debug without metrics.
wantSS = nonUserspaceProxySS.DeepCopy()
- wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(POD_IP):9001"})
- wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9001, HostPort: 9001}}
- gotSS = applyProxyClassToStatefulSet(proxyClassMetrics, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
+ wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env,
+ corev1.EnvVar{Name: "TS_DEBUG_ADDR_PORT", Value: "$(POD_IP):9001"},
+ corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"},
+ )
+ wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "debug", Protocol: "TCP", ContainerPort: 9001}}
+ gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(false, ptr.To(true)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar())
if diff := cmp.Diff(gotSS, wantSS); diff != "" {
- t.Fatalf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff)
+ t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff)
}
}
diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go
index f45f922463113..70c810b256c99 100644
--- a/cmd/k8s-operator/svc.go
+++ b/cmd/k8s-operator/svc.go
@@ -121,7 +121,15 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc)
}
- return reconcile.Result{}, a.maybeProvision(ctx, logger, svc)
+ if err := a.maybeProvision(ctx, logger, svc); err != nil {
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ logger.Infof("optimistic lock error, retrying: %s", err)
+ } else {
+ return reconcile.Result{}, err
+ }
+ }
+
+ return reconcile.Result{}, nil
}
// maybeCleanup removes any existing resources related to serving svc over tailscale.
@@ -131,7 +139,7 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (err error) {
oldSvcStatus := svc.Status.DeepCopy()
defer func() {
- if !apiequality.Semantic.DeepEqual(oldSvcStatus, svc.Status) {
+ if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) {
// An error encountered here should get returned by the Reconcile function.
err = errors.Join(err, a.Client.Status().Update(ctx, svc))
}
@@ -152,7 +160,12 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
return nil
}
- if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc")); err != nil {
+ proxyTyp := proxyTypeEgress
+ if a.shouldExpose(svc) {
+ proxyTyp = proxyTypeIngressService
+ }
+
+ if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil {
return fmt.Errorf("failed to cleanup: %w", err)
} else if !done {
logger.Debugf("cleanup not done yet, waiting for next reconcile")
@@ -191,7 +204,7 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (err error) {
oldSvcStatus := svc.Status.DeepCopy()
defer func() {
- if !apiequality.Semantic.DeepEqual(oldSvcStatus, svc.Status) {
+ if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) {
// An error encountered here should get returned by the Reconcile function.
err = errors.Join(err, a.Client.Status().Update(ctx, svc))
}
@@ -256,6 +269,10 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
ChildResourceLabels: crl,
ProxyClassName: proxyClass,
}
+ sts.proxyType = proxyTypeEgress
+ if a.shouldExpose(svc) {
+ sts.proxyType = proxyTypeIngressService
+ }
a.mu.Lock()
if a.shouldExposeClusterIP(svc) {
@@ -311,11 +328,11 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
return nil
}
- _, tsHost, tsIPs, err := a.ssr.DeviceInfo(ctx, crl)
+ dev, err := a.ssr.DeviceInfo(ctx, crl, logger)
if err != nil {
return fmt.Errorf("failed to get device ID: %w", err)
}
- if tsHost == "" {
+ if dev == nil || dev.hostname == "" {
msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth"
logger.Debug(msg)
// No hostname yet. Wait for the proxy pod to auth.
@@ -324,9 +341,9 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
return nil
}
- logger.Debugf("setting Service LoadBalancer status to %q, %s", tsHost, strings.Join(tsIPs, ", "))
+ logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", "))
ingress := []corev1.LoadBalancerIngress{
- {Hostname: tsHost},
+ {Hostname: dev.hostname},
}
clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP)
if err != nil {
@@ -334,7 +351,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger)
return errors.New(msg)
}
- for _, ip := range tsIPs {
+ for _, ip := range dev.ips {
addr, err := netip.ParseAddr(ip)
if err != nil {
continue
@@ -358,9 +375,14 @@ func validateService(svc *corev1.Service) []string {
violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q does not appear to be a valid MagicDNS name", AnnotationTailnetTargetFQDN, fqdn))
}
}
-
- // TODO(irbekrm): validate that tailscale.com/tailnet-ip annotation is a
- // valid IP address (tailscale/tailscale#13671).
+ if ipStr := svc.Annotations[AnnotationTailnetTargetIP]; ipStr != "" {
+ ip, err := netip.ParseAddr(ipStr)
+ if err != nil {
+ violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q could not be parsed as a valid IP Address, error: %s", AnnotationTailnetTargetIP, ipStr, err))
+ } else if !ip.IsValid() {
+ violations = append(violations, fmt.Sprintf("parsed IP address in annotation %s: %q is not valid", AnnotationTailnetTargetIP, ipStr))
+ }
+ }
svcName := nameForService(svc)
if err := dnsname.ValidLabel(svcName); err != nil {
diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go
index 6b6297cbdd4fe..f6ae29b62fefc 100644
--- a/cmd/k8s-operator/testutils_test.go
+++ b/cmd/k8s-operator/testutils_test.go
@@ -8,6 +8,7 @@ package main
import (
"context"
"encoding/json"
+ "fmt"
"net/netip"
"reflect"
"strings"
@@ -21,6 +22,7 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -39,7 +41,10 @@ type configOpts struct {
secretName string
hostname string
namespace string
+ tailscaleNamespace string
+ namespaced bool
parentType string
+ proxyType string
priorityClassName string
firewallMode string
tailnetTargetIP string
@@ -48,6 +53,7 @@ type configOpts struct {
clusterTargetDNS string
subnetRoutes string
isExitNode bool
+ isAppConnector bool
confFileHash string
serveConfig *ipn.ServeConfig
shouldEnableForwardingClusterTrafficViaIngress bool
@@ -55,6 +61,7 @@ type configOpts struct {
app string
shouldRemoveAuthKey bool
secretExtraData map[string][]byte
+ enableMetrics bool
}
func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet {
@@ -69,14 +76,13 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
Env: []corev1.EnvVar{
{Name: "TS_USERSPACE", Value: "false"},
{Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
+ {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
+ {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
{Name: "TS_KUBE_SECRET", Value: opts.secretName},
- {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"},
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"},
},
SecurityContext: &corev1.SecurityContext{
- Capabilities: &corev1.Capabilities{
- Add: []corev1.Capability{"NET_ADMIN"},
- },
+ Privileged: ptr.To(true),
},
ImagePullPolicy: "Always",
}
@@ -150,6 +156,29 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef
Name: "TS_INTERNAL_APP",
Value: opts.app,
})
+ if opts.enableMetrics {
+ tsContainer.Env = append(tsContainer.Env,
+ corev1.EnvVar{
+ Name: "TS_DEBUG_ADDR_PORT",
+ Value: "$(POD_IP):9001"},
+ corev1.EnvVar{
+ Name: "TS_TAILSCALED_EXTRA_ARGS",
+ Value: "--debug=$(TS_DEBUG_ADDR_PORT)",
+ },
+ corev1.EnvVar{
+ Name: "TS_LOCAL_ADDR_PORT",
+ Value: "$(POD_IP):9002",
+ },
+ corev1.EnvVar{
+ Name: "TS_ENABLE_METRICS",
+ Value: "true",
+ },
+ )
+ tsContainer.Ports = append(tsContainer.Ports,
+ corev1.ContainerPort{Name: "debug", ContainerPort: 9001, Protocol: "TCP"},
+ corev1.ContainerPort{Name: "metrics", ContainerPort: 9002, Protocol: "TCP"},
+ )
+ }
ss := &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
@@ -228,8 +257,9 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
Env: []corev1.EnvVar{
{Name: "TS_USERSPACE", Value: "true"},
{Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
+ {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
+ {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}},
{Name: "TS_KUBE_SECRET", Value: opts.secretName},
- {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH", Value: "/etc/tsconfig/tailscaled"},
{Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"},
{Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"},
{Name: "TS_INTERNAL_APP", Value: opts.app},
@@ -240,6 +270,29 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps
{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"},
},
}
+ if opts.enableMetrics {
+ tsContainer.Env = append(tsContainer.Env,
+ corev1.EnvVar{
+ Name: "TS_DEBUG_ADDR_PORT",
+ Value: "$(POD_IP):9001"},
+ corev1.EnvVar{
+ Name: "TS_TAILSCALED_EXTRA_ARGS",
+ Value: "--debug=$(TS_DEBUG_ADDR_PORT)",
+ },
+ corev1.EnvVar{
+ Name: "TS_LOCAL_ADDR_PORT",
+ Value: "$(POD_IP):9002",
+ },
+ corev1.EnvVar{
+ Name: "TS_ENABLE_METRICS",
+ Value: "true",
+ },
+ )
+ tsContainer.Ports = append(tsContainer.Ports, corev1.ContainerPort{
+ Name: "debug", ContainerPort: 9001, Protocol: "TCP"},
+ corev1.ContainerPort{Name: "metrics", ContainerPort: 9002, Protocol: "TCP"},
+ )
+ }
volumes := []corev1.Volume{
{
Name: "tailscaledconfig",
@@ -334,6 +387,87 @@ func expectedHeadlessService(name string, parentType string) *corev1.Service {
}
}
+func expectedMetricsService(opts configOpts) *corev1.Service {
+ labels := metricsLabels(opts)
+ selector := map[string]string{
+ "tailscale.com/managed": "true",
+ "tailscale.com/parent-resource": "test",
+ "tailscale.com/parent-resource-type": opts.parentType,
+ }
+ if opts.namespaced {
+ selector["tailscale.com/parent-resource-ns"] = opts.namespace
+ }
+ return &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: metricsResourceName(opts.stsName),
+ Namespace: opts.tailscaleNamespace,
+ Labels: labels,
+ },
+ Spec: corev1.ServiceSpec{
+ Selector: selector,
+ Type: corev1.ServiceTypeClusterIP,
+ Ports: []corev1.ServicePort{{Protocol: "TCP", Port: 9002, Name: "metrics"}},
+ },
+ }
+}
+
+func metricsLabels(opts configOpts) map[string]string {
+ promJob := fmt.Sprintf("ts_%s_default_test", opts.proxyType)
+ if !opts.namespaced {
+ promJob = fmt.Sprintf("ts_%s_test", opts.proxyType)
+ }
+ labels := map[string]string{
+ "tailscale.com/managed": "true",
+ "tailscale.com/metrics-target": opts.stsName,
+ "ts_prom_job": promJob,
+ "ts_proxy_type": opts.proxyType,
+ "ts_proxy_parent_name": "test",
+ }
+ if opts.namespaced {
+ labels["ts_proxy_parent_namespace"] = "default"
+ }
+ return labels
+}
+
+func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstructured {
+ t.Helper()
+ labels := metricsLabels(opts)
+ name := metricsResourceName(opts.stsName)
+ sm := &ServiceMonitor{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: opts.tailscaleNamespace,
+ Labels: labels,
+ ResourceVersion: "1",
+ OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true)}},
+ },
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ServiceMonitor",
+ APIVersion: "monitoring.coreos.com/v1",
+ },
+ Spec: ServiceMonitorSpec{
+ Selector: metav1.LabelSelector{MatchLabels: labels},
+ Endpoints: []ServiceMonitorEndpoint{{
+ Port: "metrics",
+ }},
+ NamespaceSelector: ServiceMonitorNamespaceSelector{
+ MatchNames: []string{opts.tailscaleNamespace},
+ },
+ JobLabel: "ts_prom_job",
+ TargetLabels: []string{
+ "ts_proxy_parent_name",
+ "ts_proxy_parent_namespace",
+ "ts_proxy_type",
+ },
+ },
+ }
+ u, err := serviceMonitorToUnstructured(sm)
+ if err != nil {
+ t.Fatalf("error converting ServiceMonitor to unstructured: %v", err)
+ }
+ return u
+}
+
func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Secret {
t.Helper()
s := &corev1.Secret{
@@ -350,12 +484,14 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec
mak.Set(&s.StringData, "serve-config", string(serveConfigBs))
}
conf := &ipn.ConfigVAlpha{
- Version: "alpha0",
- AcceptDNS: "false",
- Hostname: &opts.hostname,
- Locked: "false",
- AuthKey: ptr.To("secret-authkey"),
- AcceptRoutes: "false",
+ Version: "alpha0",
+ AcceptDNS: "false",
+ Hostname: &opts.hostname,
+ Locked: "false",
+ AuthKey: ptr.To("secret-authkey"),
+ AcceptRoutes: "false",
+ AppConnector: &ipn.AppConnectorPrefs{Advertise: false},
+ NoStatefulFiltering: "true",
}
if opts.proxyClass != "" {
t.Logf("applying configuration from ProxyClass %s", opts.proxyClass)
@@ -370,6 +506,9 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec
if opts.shouldRemoveAuthKey {
conf.AuthKey = nil
}
+ if opts.isAppConnector {
+ conf.AppConnector = &ipn.AppConnectorPrefs{Advertise: true}
+ }
var routes []netip.Prefix
if opts.subnetRoutes != "" || opts.isExitNode {
r := opts.subnetRoutes
@@ -385,21 +524,17 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec
}
}
conf.AdvertiseRoutes = routes
- b, err := json.Marshal(conf)
+ bnn, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling tailscaled config")
}
- if opts.tailnetTargetFQDN != "" || opts.tailnetTargetIP != "" {
- conf.NoStatefulFiltering = "true"
- } else {
- conf.NoStatefulFiltering = "false"
- }
+ conf.AppConnector = nil
bn, err := json.Marshal(conf)
if err != nil {
t.Fatalf("error marshalling tailscaled config")
}
- mak.Set(&s.StringData, "tailscaled", string(b))
mak.Set(&s.StringData, "cap-95.hujson", string(bn))
+ mak.Set(&s.StringData, "cap-107.hujson", string(bnn))
labels := map[string]string{
"tailscale.com/managed": "true",
"tailscale.com/parent-resource": "test",
@@ -500,6 +635,21 @@ func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want
}
}
+func expectEqualUnstructured(t *testing.T, client client.Client, want *unstructured.Unstructured) {
+ t.Helper()
+ got := &unstructured.Unstructured{}
+ got.SetGroupVersionKind(want.GroupVersionKind())
+ if err := client.Get(context.Background(), types.NamespacedName{
+ Name: want.GetName(),
+ Namespace: want.GetNamespace(),
+ }, got); err != nil {
+ t.Fatalf("getting %q: %v", want.GetName(), err)
+ }
+ if diff := cmp.Diff(got, want); diff != "" {
+ t.Fatalf("unexpected contents of Unstructured (-got +want):\n%s", diff)
+ }
+}
+
func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string) {
t.Helper()
obj := O(new(T))
@@ -642,7 +792,7 @@ func removeHashAnnotation(sts *appsv1.StatefulSet) {
func removeTargetPortsFromSvc(svc *corev1.Service) {
newPorts := make([]corev1.ServicePort, 0)
for _, p := range svc.Spec.Ports {
- newPorts = append(newPorts, corev1.ServicePort{Protocol: p.Protocol, Port: p.Port})
+ newPorts = append(newPorts, corev1.ServicePort{Protocol: p.Protocol, Port: p.Port, Name: p.Name})
}
svc.Spec.Ports = newPorts
}
@@ -650,29 +800,29 @@ func removeTargetPortsFromSvc(svc *corev1.Service) {
func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) {
return func(secret *corev1.Secret) {
t.Helper()
- if len(secret.StringData["tailscaled"]) != 0 {
+ if len(secret.StringData["cap-95.hujson"]) != 0 {
conf := &ipn.ConfigVAlpha{}
- if err := json.Unmarshal([]byte(secret.StringData["tailscaled"]), conf); err != nil {
- t.Fatalf("error unmarshalling 'tailscaled' contents: %v", err)
+ if err := json.Unmarshal([]byte(secret.StringData["cap-95.hujson"]), conf); err != nil {
+ t.Fatalf("error umarshalling 'cap-95.hujson' contents: %v", err)
}
conf.AuthKey = nil
b, err := json.Marshal(conf)
if err != nil {
- t.Fatalf("error marshalling updated 'tailscaled' config: %v", err)
+ t.Fatalf("error marshalling 'cap-95.huson' contents: %v", err)
}
- mak.Set(&secret.StringData, "tailscaled", string(b))
+ mak.Set(&secret.StringData, "cap-95.hujson", string(b))
}
- if len(secret.StringData["cap-95.hujson"]) != 0 {
+ if len(secret.StringData["cap-107.hujson"]) != 0 {
conf := &ipn.ConfigVAlpha{}
- if err := json.Unmarshal([]byte(secret.StringData["cap-95.hujson"]), conf); err != nil {
- t.Fatalf("error umarshalling 'cap-95.hujson' contents: %v", err)
+ if err := json.Unmarshal([]byte(secret.StringData["cap-107.hujson"]), conf); err != nil {
+ t.Fatalf("error umarshalling 'cap-107.hujson' contents: %v", err)
}
conf.AuthKey = nil
b, err := json.Marshal(conf)
if err != nil {
- t.Fatalf("error marshalling 'cap-95.huson' contents: %v", err)
+ t.Fatalf("error marshalling 'cap-107.huson' contents: %v", err)
}
- mak.Set(&secret.StringData, "cap-95.hujson", string(b))
+ mak.Set(&secret.StringData, "cap-107.hujson", string(b))
}
}
}
diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go
index cfe38c50af311..44ce731fe82d6 100644
--- a/cmd/k8s-operator/tsrecorder.go
+++ b/cmd/k8s-operator/tsrecorder.go
@@ -11,6 +11,7 @@ import (
"fmt"
"net/http"
"slices"
+ "strings"
"sync"
"github.com/pkg/errors"
@@ -38,6 +39,7 @@ import (
const (
reasonRecorderCreationFailed = "RecorderCreationFailed"
+ reasonRecorderCreating = "RecorderCreating"
reasonRecorderCreated = "RecorderCreated"
reasonRecorderInvalid = "RecorderInvalid"
@@ -102,7 +104,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques
oldTSRStatus := tsr.Status.DeepCopy()
setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger)
- if !apiequality.Semantic.DeepEqual(oldTSRStatus, tsr.Status) {
+ if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) {
// An error encountered here should get returned by the Reconcile function.
if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil {
err = errors.Wrap(err, updateErr.Error())
@@ -119,23 +121,28 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques
logger.Infof("ensuring Recorder is set up")
tsr.Finalizers = append(tsr.Finalizers, FinalizerName)
if err := r.Update(ctx, tsr); err != nil {
- logger.Errorf("error adding finalizer: %w", err)
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed)
}
}
if err := r.validate(tsr); err != nil {
- logger.Errorf("error validating Recorder spec: %w", err)
message := fmt.Sprintf("Recorder is invalid: %s", err)
r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message)
return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message)
}
if err = r.maybeProvision(ctx, tsr); err != nil {
- logger.Errorf("error creating Recorder resources: %w", err)
+ reason := reasonRecorderCreationFailed
message := fmt.Sprintf("failed creating Recorder: %s", err)
- r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message)
- return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, message)
+ if strings.Contains(err.Error(), optimisticLockErrorMsg) {
+ reason = reasonRecorderCreating
+ message = fmt.Sprintf("optimistic lock error, retrying: %s", err)
+ err = nil
+ logger.Info(message)
+ } else {
+ r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderCreationFailed, message)
+ }
+ return setStatusReady(tsr, metav1.ConditionFalse, reason, message)
}
logger.Info("Recorder resources synced")
diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go
index 4a74fb7e03442..4a7bf988773a6 100644
--- a/cmd/k8s-operator/tsrecorder_specs.go
+++ b/cmd/k8s-operator/tsrecorder_specs.go
@@ -130,6 +130,15 @@ func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role {
fmt.Sprintf("%s-0", tsr.Name), // Contains the node state.
},
},
+ {
+ APIGroups: []string{""},
+ Resources: []string{"events"},
+ Verbs: []string{
+ "get",
+ "create",
+ "patch",
+ },
+ },
},
}
}
@@ -203,6 +212,14 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar {
},
},
},
+ {
+ Name: "POD_UID",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.uid",
+ },
+ },
+ },
{
Name: "TS_STATE",
Value: "kube:$(POD_NAME)",
diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt
index a35f59516ee32..34a71c43e0010 100644
--- a/cmd/stund/depaware.txt
+++ b/cmd/stund/depaware.txt
@@ -8,7 +8,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+
github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+
github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+
- github.com/google/uuid from tailscale.com/util/fastuuid
💣 github.com/prometheus/client_golang/prometheus from tailscale.com/tsweb/promvarz
github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+
@@ -67,15 +66,16 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
tailscale.com/types/logger from tailscale.com/tsweb
tailscale.com/types/opt from tailscale.com/envknob+
tailscale.com/types/ptr from tailscale.com/tailcfg+
+ tailscale.com/types/result from tailscale.com/util/lineiter
tailscale.com/types/structs from tailscale.com/tailcfg+
tailscale.com/types/tkatype from tailscale.com/tailcfg+
tailscale.com/types/views from tailscale.com/net/tsaddr+
tailscale.com/util/ctxkey from tailscale.com/tsweb+
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
tailscale.com/util/dnsname from tailscale.com/tailcfg
- tailscale.com/util/fastuuid from tailscale.com/tsweb
- tailscale.com/util/lineread from tailscale.com/version/distro
+ tailscale.com/util/lineiter from tailscale.com/version/distro
tailscale.com/util/nocasemaps from tailscale.com/types/ipproto
+ tailscale.com/util/rands from tailscale.com/tsweb
tailscale.com/util/slicesx from tailscale.com/tailcfg
tailscale.com/util/vizerror from tailscale.com/tailcfg+
tailscale.com/version from tailscale.com/envknob+
@@ -132,7 +132,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
crypto/tls from net/http+
crypto/x509 from crypto/tls
crypto/x509/pkix from crypto/x509
- database/sql/driver from github.com/google/uuid
embed from crypto/internal/nistec+
encoding from encoding/json+
encoding/asn1 from crypto/x509+
@@ -163,7 +162,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar
math/big from crypto/dsa+
math/bits from compress/flate+
math/rand from math/big+
- math/rand/v2 from tailscale.com/util/fastuuid+
+ math/rand/v2 from internal/concurrent+
mime from github.com/prometheus/common/expfmt+
mime/multipart from net/http
mime/quotedprintable from mime/multipart
diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go
new file mode 100644
index 0000000000000..c9474c4274dd2
--- /dev/null
+++ b/cmd/tailscale/cli/advertise.go
@@ -0,0 +1,78 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package cli
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "strings"
+
+ "github.com/peterbourgon/ff/v3/ffcli"
+ "tailscale.com/envknob"
+ "tailscale.com/ipn"
+ "tailscale.com/tailcfg"
+)
+
+var advertiseArgs struct {
+ services string // comma-separated list of services to advertise
+}
+
+// TODO(naman): This flag may move to set.go or serve_v2.go after the WIPCode
+// envknob is not needed.
+var advertiseCmd = &ffcli.Command{
+ Name: "advertise",
+ ShortUsage: "tailscale advertise --services=",
+ ShortHelp: "Advertise this node as a destination for a service",
+ Exec: runAdvertise,
+ FlagSet: (func() *flag.FlagSet {
+ fs := newFlagSet("advertise")
+ fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")")
+ return fs
+ })(),
+}
+
+func maybeAdvertiseCmd() []*ffcli.Command {
+ if !envknob.UseWIPCode() {
+ return nil
+ }
+ return []*ffcli.Command{advertiseCmd}
+}
+
+func runAdvertise(ctx context.Context, args []string) error {
+ if len(args) > 0 {
+ return flag.ErrHelp
+ }
+
+ services, err := parseServiceNames(advertiseArgs.services)
+ if err != nil {
+ return err
+ }
+
+ _, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{
+ AdvertiseServicesSet: true,
+ Prefs: ipn.Prefs{
+ AdvertiseServices: services,
+ },
+ })
+ return err
+}
+
+// parseServiceNames takes a comma-separated list of service names
+// (eg. "svc:hello,svc:webserver,svc:catphotos"), splits them into
+// a list and validates each service name. If valid, it returns
+// the service names in a slice of strings.
+func parseServiceNames(servicesArg string) ([]string, error) {
+ var services []string
+ if servicesArg != "" {
+ services = strings.Split(servicesArg, ",")
+ for _, svc := range services {
+ err := tailcfg.CheckServiceName(svc)
+ if err != nil {
+ return nil, fmt.Errorf("service %q: %s", svc, err)
+ }
+ }
+ }
+ return services, nil
+}
diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go
index 864cf6903a6d0..66961b2e0086d 100644
--- a/cmd/tailscale/cli/cli.go
+++ b/cmd/tailscale/cli/cli.go
@@ -93,8 +93,13 @@ func Run(args []string) (err error) {
args = CleanUpArgs(args)
- if len(args) == 1 && (args[0] == "-V" || args[0] == "--version") {
- args = []string{"version"}
+ if len(args) == 1 {
+ switch args[0] {
+ case "-V", "--version":
+ args = []string{"version"}
+ case "help":
+ args = []string{"--help"}
+ }
}
var warnOnce sync.Once
@@ -177,7 +182,7 @@ For help on subcommands, add --help after: "tailscale status --help".
This CLI is still under active development. Commands and flags will
change in the future.
`),
- Subcommands: []*ffcli.Command{
+ Subcommands: append([]*ffcli.Command{
upCmd,
downCmd,
setCmd,
@@ -185,10 +190,12 @@ change in the future.
logoutCmd,
switchCmd,
configureCmd,
+ syspolicyCmd,
netcheckCmd,
ipCmd,
dnsCmd,
statusCmd,
+ metricsCmd,
pingCmd,
ncCmd,
sshCmd,
@@ -207,7 +214,7 @@ change in the future.
debugCmd,
driveCmd,
idTokenCmd,
- },
+ }, maybeAdvertiseCmd()...),
FlagSet: rootfs,
Exec: func(ctx context.Context, args []string) error {
if len(args) > 0 {
diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go
index d103c8f7e9f5c..0444e914c7260 100644
--- a/cmd/tailscale/cli/cli_test.go
+++ b/cmd/tailscale/cli/cli_test.go
@@ -9,6 +9,7 @@ import (
"encoding/json"
"flag"
"fmt"
+ "io"
"net/netip"
"reflect"
"strings"
@@ -946,6 +947,10 @@ func TestPrefFlagMapping(t *testing.T) {
// Handled by the tailscale share subcommand, we don't want a CLI
// flag for this.
continue
+ case "AdvertiseServices":
+ // Handled by the tailscale advertise subcommand, we don't want a
+ // CLI flag for this.
+ continue
case "InternalExitNodePrior":
// Used internally by LocalBackend as part of exit node usage toggling.
// No CLI flag for this.
@@ -1476,3 +1481,33 @@ func TestParseNLArgs(t *testing.T) {
})
}
}
+
+func TestHelpAlias(t *testing.T) {
+ var stdout, stderr bytes.Buffer
+ tstest.Replace[io.Writer](t, &Stdout, &stdout)
+ tstest.Replace[io.Writer](t, &Stderr, &stderr)
+
+ gotExit0 := false
+ defer func() {
+ if !gotExit0 {
+ t.Error("expected os.Exit(0) to be called")
+ return
+ }
+ if !strings.Contains(stderr.String(), "SUBCOMMANDS") {
+ t.Errorf("expected help output to contain SUBCOMMANDS; got stderr=%q; stdout=%q", stderr.String(), stdout.String())
+ }
+ }()
+ defer func() {
+ if e := recover(); e != nil {
+ if strings.Contains(fmt.Sprint(e), "unexpected call to os.Exit(0)") {
+ gotExit0 = true
+ } else {
+ t.Errorf("unexpected panic: %v", e)
+ }
+ }
+ }()
+ err := Run([]string{"help"})
+ if err != nil {
+ t.Fatalf("Run: %v", err)
+ }
+}
diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go
index fdde9ef096ae3..04b343e760e3c 100644
--- a/cmd/tailscale/cli/debug.go
+++ b/cmd/tailscale/cli/debug.go
@@ -36,6 +36,7 @@ import (
"tailscale.com/hostinfo"
"tailscale.com/internal/noiseconn"
"tailscale.com/ipn"
+ "tailscale.com/net/netmon"
"tailscale.com/net/tsaddr"
"tailscale.com/net/tshttpproxy"
"tailscale.com/paths"
@@ -174,6 +175,12 @@ var debugCmd = &ffcli.Command{
Exec: localAPIAction("pick-new-derp"),
ShortHelp: "Switch to some other random DERP home region for a short time",
},
+ {
+ Name: "force-prefer-derp",
+ ShortUsage: "tailscale debug force-prefer-derp",
+ Exec: forcePreferDERP,
+ ShortHelp: "Prefer the given region ID if reachable (until restart, or 0 to clear)",
+ },
{
Name: "force-netmap-update",
ShortUsage: "tailscale debug force-netmap-update",
@@ -213,6 +220,7 @@ var debugCmd = &ffcli.Command{
fs := newFlagSet("watch-ipn")
fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages")
fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status")
+ fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags")
fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap")
fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever")
return fs
@@ -500,6 +508,7 @@ var watchIPNArgs struct {
netmap bool
initial bool
showPrivateKey bool
+ rateLimit bool
count int
}
@@ -511,6 +520,9 @@ func runWatchIPN(ctx context.Context, args []string) error {
if !watchIPNArgs.showPrivateKey {
mask |= ipn.NotifyNoPrivateKeys
}
+ if watchIPNArgs.rateLimit {
+ mask |= ipn.NotifyRateLimit
+ }
watcher, err := localClient.WatchIPNBus(ctx, mask)
if err != nil {
return err
@@ -571,6 +583,25 @@ func runDERPMap(ctx context.Context, args []string) error {
return nil
}
+func forcePreferDERP(ctx context.Context, args []string) error {
+ var n int
+ if len(args) != 1 {
+ return errors.New("expected exactly one integer argument")
+ }
+ n, err := strconv.Atoi(args[0])
+ if err != nil {
+ return fmt.Errorf("expected exactly one integer argument: %w", err)
+ }
+ b, err := json.Marshal(n)
+ if err != nil {
+ return fmt.Errorf("failed to marshal DERP region: %w", err)
+ }
+ if err := localClient.DebugActionBody(ctx, "force-prefer-derp", bytes.NewReader(b)); err != nil {
+ return fmt.Errorf("failed to force preferred DERP: %w", err)
+ }
+ return nil
+}
+
func localAPIAction(action string) func(context.Context, []string) error {
return func(ctx context.Context, args []string) error {
if len(args) > 0 {
@@ -845,6 +876,11 @@ func runTS2021(ctx context.Context, args []string) error {
logf = log.Printf
}
+ netMon, err := netmon.New(logger.WithPrefix(logf, "netmon: "))
+ if err != nil {
+ return fmt.Errorf("creating netmon: %w", err)
+ }
+
noiseDialer := &controlhttp.Dialer{
Hostname: ts2021Args.host,
HTTPPort: "80",
@@ -854,6 +890,7 @@ func runTS2021(ctx context.Context, args []string) error {
ProtocolVersion: uint16(ts2021Args.version),
Dialer: dialFunc,
Logf: logf,
+ NetMon: netMon,
}
const tries = 2
for i := range tries {
diff --git a/cmd/tailscale/cli/metrics.go b/cmd/tailscale/cli/metrics.go
new file mode 100644
index 0000000000000..d5fe9ad81cb70
--- /dev/null
+++ b/cmd/tailscale/cli/metrics.go
@@ -0,0 +1,88 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package cli
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/peterbourgon/ff/v3/ffcli"
+ "tailscale.com/atomicfile"
+)
+
+var metricsCmd = &ffcli.Command{
+ Name: "metrics",
+ ShortHelp: "Show Tailscale metrics",
+ LongHelp: strings.TrimSpace(`
+
+The 'tailscale metrics' command shows Tailscale user-facing metrics (as opposed
+to internal metrics printed by 'tailscale debug metrics').
+
+For more information about Tailscale metrics, refer to
+https://tailscale.com/s/client-metrics
+
+`),
+ ShortUsage: "tailscale metrics [flags]",
+ UsageFunc: usageFuncNoDefaultValues,
+ Exec: runMetricsNoSubcommand,
+ Subcommands: []*ffcli.Command{
+ {
+ Name: "print",
+ ShortUsage: "tailscale metrics print",
+ Exec: runMetricsPrint,
+ ShortHelp: "Prints current metric values in the Prometheus text exposition format",
+ },
+ {
+ Name: "write",
+ ShortUsage: "tailscale metrics write ",
+ Exec: runMetricsWrite,
+ ShortHelp: "Writes metric values to a file",
+ LongHelp: strings.TrimSpace(`
+
+The 'tailscale metrics write' command writes metric values to a text file provided as its
+only argument. It's meant to be used alongside Prometheus node exporter, allowing Tailscale
+metrics to be consumed and exported by the textfile collector.
+
+As an example, to export Tailscale metrics on an Ubuntu system running node exporter, you
+can regularly run 'tailscale metrics write /var/lib/prometheus/node-exporter/tailscaled.prom'
+using cron or a systemd timer.
+
+ `),
+ },
+ },
+}
+
+// runMetricsNoSubcommand prints metric values if no subcommand is specified.
+func runMetricsNoSubcommand(ctx context.Context, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("tailscale metrics: unknown subcommand: %s", args[0])
+ }
+
+ return runMetricsPrint(ctx, args)
+}
+
+// runMetricsPrint prints metric values to stdout.
+func runMetricsPrint(ctx context.Context, args []string) error {
+ out, err := localClient.UserMetrics(ctx)
+ if err != nil {
+ return err
+ }
+ Stdout.Write(out)
+ return nil
+}
+
+// runMetricsWrite writes metric values to a file.
+func runMetricsWrite(ctx context.Context, args []string) error {
+ if len(args) != 1 {
+ return errors.New("usage: tailscale metrics write ")
+ }
+ path := args[0]
+ out, err := localClient.UserMetrics(ctx)
+ if err != nil {
+ return err
+ }
+ return atomicfile.WriteFile(path, out, 0644)
+}
diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go
index 682cd99a3c6e4..312475eced978 100644
--- a/cmd/tailscale/cli/netcheck.go
+++ b/cmd/tailscale/cli/netcheck.go
@@ -136,6 +136,7 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error {
}
printf("\nReport:\n")
+ printf("\t* Time: %v\n", report.Now.Format(time.RFC3339Nano))
printf("\t* UDP: %v\n", report.UDP)
if report.GlobalV4.IsValid() {
printf("\t* IPv4: yes, %s\n", report.GlobalV4)
diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go
index 4cfa50d581ed4..acb50e723c585 100644
--- a/cmd/tailscale/cli/risks.go
+++ b/cmd/tailscale/cli/risks.go
@@ -17,11 +17,18 @@ import (
)
var (
- riskTypes []string
- riskLoseSSH = registerRiskType("lose-ssh")
- riskAll = registerRiskType("all")
+ riskTypes []string
+ riskLoseSSH = registerRiskType("lose-ssh")
+ riskMacAppConnector = registerRiskType("mac-app-connector")
+ riskAll = registerRiskType("all")
)
+const riskMacAppConnectorMessage = `
+You are trying to configure an app connector on macOS, which is not officially supported due to system limitations. This may result in performance and reliability issues.
+
+Do not use a macOS app connector for any mission-critical purposes. For the best experience, Linux is the only recommended platform for app connectors.
+`
+
func registerRiskType(riskType string) string {
riskTypes = append(riskTypes, riskType)
return riskType
diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go
index 2e1251f04a4b9..e8e5f0c51e15b 100644
--- a/cmd/tailscale/cli/set.go
+++ b/cmd/tailscale/cli/set.go
@@ -10,6 +10,7 @@ import (
"fmt"
"net/netip"
"os/exec"
+ "runtime"
"strings"
"github.com/peterbourgon/ff/v3/ffcli"
@@ -203,6 +204,12 @@ func runSet(ctx context.Context, args []string) (retErr error) {
}
}
+ if runtime.GOOS == "darwin" && maskedPrefs.AppConnector.Advertise {
+ if err := presentRiskToUser(riskMacAppConnector, riskMacAppConnectorMessage, setArgs.acceptedRisks); err != nil {
+ return err
+ }
+ }
+
if maskedPrefs.RunSSHSet {
wantSSH, haveSSH := maskedPrefs.RunSSH, curPrefs.RunSSH
if err := presentSSHToggleRisk(wantSSH, haveSSH, setArgs.acceptedRisks); err != nil {
diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go
new file mode 100644
index 0000000000000..0e903db397c7d
--- /dev/null
+++ b/cmd/tailscale/cli/syspolicy.go
@@ -0,0 +1,110 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package cli
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+ "slices"
+ "text/tabwriter"
+
+ "github.com/peterbourgon/ff/v3/ffcli"
+ "tailscale.com/util/syspolicy/setting"
+)
+
+var syspolicyArgs struct {
+ json bool // JSON output mode
+}
+
+var syspolicyCmd = &ffcli.Command{
+ Name: "syspolicy",
+ ShortHelp: "Diagnose the MDM and system policy configuration",
+ LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.",
+ ShortUsage: "tailscale syspolicy ",
+ UsageFunc: usageFuncNoDefaultValues,
+ Subcommands: []*ffcli.Command{
+ {
+ Name: "list",
+ ShortUsage: "tailscale syspolicy list",
+ Exec: runSysPolicyList,
+ ShortHelp: "Prints effective policy settings",
+ LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).",
+ FlagSet: (func() *flag.FlagSet {
+ fs := newFlagSet("syspolicy list")
+ fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format")
+ return fs
+ })(),
+ },
+ {
+ Name: "reload",
+ ShortUsage: "tailscale syspolicy reload",
+ Exec: runSysPolicyReload,
+ ShortHelp: "Forces a reload of policy settings, even if no changes are detected, and prints the result",
+ LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.",
+ FlagSet: (func() *flag.FlagSet {
+ fs := newFlagSet("syspolicy reload")
+ fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format")
+ return fs
+ })(),
+ },
+ },
+}
+
+func runSysPolicyList(ctx context.Context, args []string) error {
+ policy, err := localClient.GetEffectivePolicy(ctx, setting.DefaultScope())
+ if err != nil {
+ return err
+ }
+ printPolicySettings(policy)
+ return nil
+
+}
+
+func runSysPolicyReload(ctx context.Context, args []string) error {
+ policy, err := localClient.ReloadEffectivePolicy(ctx, setting.DefaultScope())
+ if err != nil {
+ return err
+ }
+ printPolicySettings(policy)
+ return nil
+}
+
+func printPolicySettings(policy *setting.Snapshot) {
+ if syspolicyArgs.json {
+ json, err := json.MarshalIndent(policy, "", "\t")
+ if err != nil {
+ errf("syspolicy marshalling error: %v", err)
+ } else {
+ outln(string(json))
+ }
+ return
+ }
+ if policy.Len() == 0 {
+ outln("No policy settings")
+ return
+ }
+
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
+ fmt.Fprintln(w, "Name\tOrigin\tValue\tError")
+ fmt.Fprintln(w, "----\t------\t-----\t-----")
+ for _, k := range slices.Sorted(policy.Keys()) {
+ setting, _ := policy.GetSetting(k)
+ var origin string
+ if o := setting.Origin(); o != nil {
+ origin = o.String()
+ }
+ if err := setting.Error(); err != nil {
+ fmt.Fprintf(w, "%s\t%s\t\t{%v}\n", k, origin, err)
+ } else {
+ fmt.Fprintf(w, "%s\t%s\t%v\t\n", k, origin, setting.Value())
+ }
+ }
+ w.Flush()
+
+ fmt.Println()
+ return
+}
diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go
index e1b828105b8dd..6c5c6f337f909 100644
--- a/cmd/tailscale/cli/up.go
+++ b/cmd/tailscale/cli/up.go
@@ -164,6 +164,9 @@ func defaultNetfilterMode() string {
return "on"
}
+// upArgsT is the type of upArgs, the argument struct for `tailscale up`.
+// As of 2024-10-08, upArgsT is frozen and no new arguments should be
+// added to it. Add new arguments to setArgsT instead.
type upArgsT struct {
qr bool
reset bool
@@ -376,6 +379,12 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus
return false, nil, err
}
+ if runtime.GOOS == "darwin" && env.upArgs.advertiseConnector {
+ if err := presentRiskToUser(riskMacAppConnector, riskMacAppConnectorMessage, env.upArgs.acceptedRisks); err != nil {
+ return false, nil, err
+ }
+ }
+
if env.upArgs.forceReauth && isSSHOverTailscale() {
if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil {
return false, nil, err
@@ -1152,6 +1161,7 @@ func resolveAuthKey(ctx context.Context, v, tags string) (string, error) {
}
tsClient := tailscale.NewClient("-", nil)
+ tsClient.UserAgent = "tailscale-cli"
tsClient.HTTPClient = credentials.Client(ctx)
tsClient.BaseURL = baseURL
diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt
index 2c644d1be7d79..d18d8887327fa 100644
--- a/cmd/tailscale/depaware.txt
+++ b/cmd/tailscale/depaware.txt
@@ -5,10 +5,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+
W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate
W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy
- github.com/coder/websocket from tailscale.com/control/controlhttp+
- github.com/coder/websocket/internal/errd from github.com/coder/websocket
- github.com/coder/websocket/internal/util from github.com/coder/websocket
- github.com/coder/websocket/internal/xsync from github.com/coder/websocket
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+
W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode
@@ -86,12 +82,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete
tailscale.com/control/controlbase from tailscale.com/control/controlhttp+
tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli
+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/net/portmapper
tailscale.com/derp from tailscale.com/derp/derphttp
tailscale.com/derp/derphttp from tailscale.com/net/netcheck
tailscale.com/disco from tailscale.com/derp
tailscale.com/drive from tailscale.com/client/tailscale+
tailscale.com/envknob from tailscale.com/client/tailscale+
+ tailscale.com/envknob/featureknob from tailscale.com/client/web
tailscale.com/health from tailscale.com/net/tlsdial+
tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli
tailscale.com/hostinfo from tailscale.com/client/web+
@@ -120,9 +118,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/net/stun from tailscale.com/net/netcheck
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+
+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
- tailscale.com/net/wsconn from tailscale.com/control/controlhttp+
tailscale.com/paths from tailscale.com/client/tailscale+
💣 tailscale.com/safesocket from tailscale.com/client/tailscale+
tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+
@@ -146,6 +144,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/types/persist from tailscale.com/ipn
tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+
tailscale.com/types/ptr from tailscale.com/hostinfo+
+ tailscale.com/types/result from tailscale.com/util/lineiter
tailscale.com/types/structs from tailscale.com/ipn+
tailscale.com/types/tkatype from tailscale.com/types/key+
tailscale.com/types/views from tailscale.com/tailcfg+
@@ -153,14 +152,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/util/clientmetric from tailscale.com/net/netcheck+
tailscale.com/util/cloudenv from tailscale.com/net/dnscache+
tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+
- tailscale.com/util/ctxkey from tailscale.com/types/logger
+ tailscale.com/util/ctxkey from tailscale.com/types/logger+
💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+
tailscale.com/util/groupmember from tailscale.com/client/web
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
tailscale.com/util/httpm from tailscale.com/client/tailscale+
- tailscale.com/util/lineread from tailscale.com/hostinfo+
+ tailscale.com/util/lineiter from tailscale.com/hostinfo+
L tailscale.com/util/linuxfw from tailscale.com/net/netns
tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+
tailscale.com/util/multierr from tailscale.com/control/controlhttp+
@@ -172,14 +171,18 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+
tailscale.com/util/syspolicy from tailscale.com/ipn
tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+
- tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy
- tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy
- tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli
+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+
+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source
+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy
+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+
+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+
+ tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli+
tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli
tailscale.com/util/usermetric from tailscale.com/health
tailscale.com/util/vizerror from tailscale.com/tailcfg+
W 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+
W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate
+ W 💣 tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
tailscale.com/version from tailscale.com/client/web+
tailscale.com/version/distro from tailscale.com/client/web+
@@ -318,7 +321,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep
reflect from archive/tar+
regexp from github.com/coreos/go-iptables/iptables+
regexp/syntax from regexp
- runtime/debug from github.com/coder/websocket/internal/xsync+
+ runtime/debug from tailscale.com+
slices from tailscale.com/client/web+
sort from compress/flate+
strconv from archive/tar+
diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt
index 6f71a88a93217..81cd53271cf9e 100644
--- a/cmd/tailscaled/depaware.txt
+++ b/cmd/tailscaled/depaware.txt
@@ -79,10 +79,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http
L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm
github.com/bits-and-blooms/bitset from github.com/gaissmai/bart
- github.com/coder/websocket from tailscale.com/control/controlhttp+
- github.com/coder/websocket/internal/errd from github.com/coder/websocket
- github.com/coder/websocket/internal/util from github.com/coder/websocket
- github.com/coder/websocket/internal/xsync from github.com/coder/websocket
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh
W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+
@@ -221,7 +217,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+
gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/net/tstun+
- gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack
+ gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack+
gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+
gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+
💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+
@@ -249,6 +245,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/control/controlbase from tailscale.com/control/controlhttp+
tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+
tailscale.com/control/controlhttp from tailscale.com/control/controlclient
+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp
tailscale.com/control/controlknobs from tailscale.com/control/controlclient+
tailscale.com/derp from tailscale.com/derp/derphttp+
tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+
@@ -263,6 +260,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/drive/driveimpl/dirfs from tailscale.com/drive/driveimpl+
tailscale.com/drive/driveimpl/shared from tailscale.com/drive/driveimpl+
tailscale.com/envknob from tailscale.com/client/tailscale+
+ tailscale.com/envknob/featureknob from tailscale.com/client/web+
tailscale.com/health from tailscale.com/control/controlclient+
tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal
tailscale.com/hostinfo from tailscale.com/client/web+
@@ -321,11 +319,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/net/stun from tailscale.com/ipn/localapi+
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/control/controlclient+
+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial
tailscale.com/net/tsaddr from tailscale.com/client/web+
tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+
💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+
tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+
- tailscale.com/net/wsconn from tailscale.com/control/controlhttp+
tailscale.com/omit from tailscale.com/ipn/conffile
tailscale.com/paths from tailscale.com/client/tailscale+
💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal
@@ -362,6 +360,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/types/persist from tailscale.com/control/controlclient+
tailscale.com/types/preftype from tailscale.com/ipn+
tailscale.com/types/ptr from tailscale.com/control/controlclient+
+ tailscale.com/types/result from tailscale.com/util/lineiter
tailscale.com/types/structs from tailscale.com/control/controlclient+
tailscale.com/types/tkatype from tailscale.com/tka+
tailscale.com/types/views from tailscale.com/ipn/ipnlocal+
@@ -379,7 +378,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
💣 tailscale.com/util/hashx from tailscale.com/util/deephash
tailscale.com/util/httphdr from tailscale.com/ipn/ipnlocal+
tailscale.com/util/httpm from tailscale.com/client/tailscale+
- tailscale.com/util/lineread from tailscale.com/hostinfo+
+ tailscale.com/util/lineiter from tailscale.com/hostinfo+
L tailscale.com/util/linuxfw from tailscale.com/net/netns+
tailscale.com/util/mak from tailscale.com/control/controlclient+
tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+
@@ -399,8 +398,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+
tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+
tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+
- tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy
- tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy
+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+
+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source
+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+
+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+
+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+
tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock
tailscale.com/util/systemd from tailscale.com/control/controlclient+
tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+
@@ -410,7 +412,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/util/vizerror from tailscale.com/tailcfg+
💣 tailscale.com/util/winutil from tailscale.com/clientupdate+
W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+
- W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns
+ W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+
W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
tailscale.com/util/zstdframe from tailscale.com/control/controlclient+
diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go
new file mode 100644
index 0000000000000..2b4bc280d26cf
--- /dev/null
+++ b/cmd/tailscaled/deps_test.go
@@ -0,0 +1,30 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package main
+
+import (
+ "testing"
+
+ "tailscale.com/tstest/deptest"
+)
+
+func TestOmitSSH(t *testing.T) {
+ const msg = "unexpected with ts_omit_ssh"
+ deptest.DepChecker{
+ GOOS: "linux",
+ GOARCH: "amd64",
+ Tags: "ts_omit_ssh",
+ BadDeps: map[string]string{
+ "tailscale.com/ssh/tailssh": msg,
+ "golang.org/x/crypto/ssh": msg,
+ "tailscale.com/sessionrecording": msg,
+ "github.com/anmitsu/go-shlex": msg,
+ "github.com/creack/pty": msg,
+ "github.com/kr/fs": msg,
+ "github.com/pkg/sftp": msg,
+ "github.com/u-root/u-root/pkg/termios": msg,
+ "tempfork/gliderlabs/ssh": msg,
+ },
+ }.Check(t)
+}
diff --git a/cmd/tailscaled/ssh.go b/cmd/tailscaled/ssh.go
index f7b0b367ead57..b10a3b7748719 100644
--- a/cmd/tailscaled/ssh.go
+++ b/cmd/tailscaled/ssh.go
@@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
-//go:build linux || darwin || freebsd || openbsd
+//go:build (linux || darwin || freebsd || openbsd) && !ts_omit_ssh
package main
diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go
index 2831b4061973d..7a5ee03983f44 100644
--- a/cmd/tailscaled/tailscaled.go
+++ b/cmd/tailscaled/tailscaled.go
@@ -788,7 +788,6 @@ func runDebugServer(mux *http.ServeMux, addr string) {
}
func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) {
- tfs, _ := sys.DriveForLocal.GetOK()
ret, err := netstack.Create(logf,
sys.Tun.Get(),
sys.Engine.Get(),
@@ -796,7 +795,6 @@ func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) {
sys.Dialer.Get(),
sys.DNSManager.Get(),
sys.ProxyMapper(),
- tfs,
)
if err != nil {
return nil, err
diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go
index 35c878f38ece3..786c5d8330939 100644
--- a/cmd/tailscaled/tailscaled_windows.go
+++ b/cmd/tailscaled/tailscaled_windows.go
@@ -134,14 +134,13 @@ func runWindowsService(pol *logpolicy.Policy) error {
logger.Logf(log.Printf).JSON(1, "SupportInfo", osdiag.SupportInfo(osdiag.LogSupportInfoReasonStartup))
}()
- if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions {
- syslog, err := eventlog.Open(serviceName)
- if err == nil {
- syslogf = func(format string, args ...any) {
+ if syslog, err := eventlog.Open(serviceName); err == nil {
+ syslogf = func(format string, args ...any) {
+ if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions {
syslog.Info(0, fmt.Sprintf(format, args...))
}
- defer syslog.Close()
}
+ defer syslog.Close()
}
syslogf("Service entering svc.Run")
@@ -160,10 +159,7 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch
changes <- svc.Status{State: svc.StartPending}
syslogf("Service start pending")
- svcAccepts := svc.AcceptStop
- if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock {
- svcAccepts |= svc.AcceptSessionChange
- }
+ svcAccepts := svc.AcceptStop | svc.AcceptSessionChange
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -371,13 +367,15 @@ func handleSessionChange(chgRequest svc.ChangeRequest) {
return
}
- log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.")
- go func() {
- err := dns.Flush()
- if err != nil {
- log.Printf("Error flushing DNS on session unlock: %v", err)
- }
- }()
+ if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock {
+ log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.")
+ go func() {
+ err := dns.Flush()
+ if err != nil {
+ log.Printf("Error flushing DNS on session unlock: %v", err)
+ }
+ }()
+ }
}
var (
diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go
index 9b8d7a7c17ba5..f6ff8f00a93ab 100644
--- a/cmd/testwrapper/testwrapper.go
+++ b/cmd/testwrapper/testwrapper.go
@@ -42,6 +42,7 @@ type testAttempt struct {
testName string // "TestFoo"
outcome string // "pass", "fail", "skip"
logs bytes.Buffer
+ start, end time.Time
isMarkedFlaky bool // set if the test is marked as flaky
issueURL string // set if the test is marked as flaky
@@ -132,11 +133,17 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te
}
pkg := goOutput.Package
pkgTests := resultMap[pkg]
+ if pkgTests == nil {
+ pkgTests = make(map[string]*testAttempt)
+ resultMap[pkg] = pkgTests
+ }
if goOutput.Test == "" {
switch goOutput.Action {
+ case "start":
+ pkgTests[""] = &testAttempt{start: goOutput.Time}
case "fail", "pass", "skip":
for _, test := range pkgTests {
- if test.outcome == "" {
+ if test.testName != "" && test.outcome == "" {
test.outcome = "fail"
ch <- test
}
@@ -144,15 +151,13 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te
ch <- &testAttempt{
pkg: goOutput.Package,
outcome: goOutput.Action,
+ start: pkgTests[""].start,
+ end: goOutput.Time,
pkgFinished: true,
}
}
continue
}
- if pkgTests == nil {
- pkgTests = make(map[string]*testAttempt)
- resultMap[pkg] = pkgTests
- }
testName := goOutput.Test
if test, _, isSubtest := strings.Cut(goOutput.Test, "/"); isSubtest {
testName = test
@@ -168,8 +173,10 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te
pkgTests[testName] = &testAttempt{
pkg: pkg,
testName: testName,
+ start: goOutput.Time,
}
case "skip", "pass", "fail":
+ pkgTests[testName].end = goOutput.Time
pkgTests[testName].outcome = goOutput.Action
ch <- pkgTests[testName]
case "output":
@@ -213,7 +220,7 @@ func main() {
firstRun.tests = append(firstRun.tests, &packageTests{Pattern: pkg})
}
toRun := []*nextRun{firstRun}
- printPkgOutcome := func(pkg, outcome string, attempt int) {
+ printPkgOutcome := func(pkg, outcome string, attempt int, runtime time.Duration) {
if outcome == "skip" {
fmt.Printf("?\t%s [skipped/no tests] \n", pkg)
return
@@ -225,10 +232,10 @@ func main() {
outcome = "FAIL"
}
if attempt > 1 {
- fmt.Printf("%s\t%s [attempt=%d]\n", outcome, pkg, attempt)
+ fmt.Printf("%s\t%s\t%.3fs\t[attempt=%d]\n", outcome, pkg, runtime.Seconds(), attempt)
return
}
- fmt.Printf("%s\t%s\n", outcome, pkg)
+ fmt.Printf("%s\t%s\t%.3fs\n", outcome, pkg, runtime.Seconds())
}
// Check for -coverprofile argument and filter it out
@@ -307,7 +314,7 @@ func main() {
// when a package times out.
failed = true
}
- printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt)
+ printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start))
continue
}
if testingVerbose || tr.outcome == "fail" {
diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go
index d7dbccd093ef8..fb2ed2c52cb2e 100644
--- a/cmd/testwrapper/testwrapper_test.go
+++ b/cmd/testwrapper/testwrapper_test.go
@@ -10,6 +10,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "regexp"
"sync"
"testing"
)
@@ -76,7 +77,10 @@ func TestFlakeRun(t *testing.T) {
t.Fatalf("go run . %s: %s with output:\n%s", testfile, err, out)
}
- want := []byte("ok\t" + testfile + " [attempt=2]")
+ // Replace the unpredictable timestamp with "0.00s".
+ out = regexp.MustCompile(`\t\d+\.\d\d\ds\t`).ReplaceAll(out, []byte("\t0.00s\t"))
+
+ want := []byte("ok\t" + testfile + "\t0.00s\t[attempt=2]")
if !bytes.Contains(out, want) {
t.Fatalf("wanted output containing %q but got:\n%s", want, out)
}
diff --git a/cmd/tsconnect/common.go b/cmd/tsconnect/common.go
index a387c00c9758e..0b0813226383a 100644
--- a/cmd/tsconnect/common.go
+++ b/cmd/tsconnect/common.go
@@ -150,6 +150,7 @@ func runEsbuildServe(buildOptions esbuild.BuildOptions) {
log.Fatalf("Cannot start esbuild server: %v", err)
}
log.Printf("Listening on http://%s:%d\n", result.Host, result.Port)
+ select {}
}
func runEsbuild(buildOptions esbuild.BuildOptions) esbuild.BuildResult {
diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go
index 8291ac9b4735f..4ea1cd89713cd 100644
--- a/cmd/tsconnect/wasm/wasm_js.go
+++ b/cmd/tsconnect/wasm/wasm_js.go
@@ -108,13 +108,14 @@ func newIPN(jsConfig js.Value) map[string]any {
SetSubsystem: sys.Set,
ControlKnobs: sys.ControlKnobs(),
HealthTracker: sys.HealthTracker(),
+ Metrics: sys.UserMetricsRegistry(),
})
if err != nil {
log.Fatal(err)
}
sys.Set(eng)
- ns, err := netstack.Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil)
+ ns, err := netstack.Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper())
if err != nil {
log.Fatalf("netstack.Create: %v", err)
}
@@ -128,6 +129,9 @@ func newIPN(jsConfig js.Value) map[string]any {
dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) {
return ns.DialContextTCP(ctx, dst)
}
+ dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) {
+ return ns.DialContextUDP(ctx, dst)
+ }
sys.NetstackRouter.Set(true)
sys.Tun.Get().Start()
@@ -268,8 +272,8 @@ func (i *jsIPN) run(jsCallbacks js.Value) {
name = p.Hostinfo().Hostname()
}
addrs := make([]string, p.Addresses().Len())
- for i := range p.Addresses().Len() {
- addrs[i] = p.Addresses().At(i).Addr().String()
+ for i, ap := range p.Addresses().All() {
+ addrs[i] = ap.Addr().String()
}
return jsNetMapPeerNode{
jsNetMapNode: jsNetMapNode{
@@ -585,8 +589,8 @@ func mapSlice[T any, M any](a []T, f func(T) M) []M {
func mapSliceView[T any, M any](a views.Slice[T], f func(T) M) []M {
n := make([]M, a.Len())
- for i := range a.Len() {
- n[i] = f(a.At(i))
+ for i, v := range a.All() {
+ n[i] = f(v)
}
return n
}
diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go
index 96223297b46e2..0c5868f3a86e6 100644
--- a/cmd/viewer/viewer.go
+++ b/cmd/viewer/viewer.go
@@ -258,6 +258,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi
writeTemplate("unsupportedField")
continue
}
+ it.Import("tailscale.com/types/views")
args.MapKeyType = it.QualifiedName(key)
mElem := m.Elem()
var template string
diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go
new file mode 100644
index 0000000000000..cd5f3d95f9c93
--- /dev/null
+++ b/cmd/viewer/viewer_test.go
@@ -0,0 +1,78 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+
+ "tailscale.com/util/codegen"
+)
+
+func TestViewerImports(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ typeNames []string
+ wantImports []string
+ }{
+ {
+ name: "Map",
+ content: `type Test struct { Map map[string]int }`,
+ typeNames: []string{"Test"},
+ wantImports: []string{"tailscale.com/types/views"},
+ },
+ {
+ name: "Slice",
+ content: `type Test struct { Slice []int }`,
+ typeNames: []string{"Test"},
+ wantImports: []string{"tailscale.com/types/views"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "test.go", "package test\n\n"+tt.content, 0)
+ if err != nil {
+ fmt.Println("Error parsing:", err)
+ return
+ }
+
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ }
+
+ conf := types.Config{}
+ pkg, err := conf.Check("", fset, []*ast.File{f}, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var output bytes.Buffer
+ tracker := codegen.NewImportTracker(pkg)
+ for i := range tt.typeNames {
+ typeName, ok := pkg.Scope().Lookup(tt.typeNames[i]).(*types.TypeName)
+ if !ok {
+ t.Fatalf("type %q does not exist", tt.typeNames[i])
+ }
+ namedType, ok := typeName.Type().(*types.Named)
+ if !ok {
+ t.Fatalf("%q is not a named type", tt.typeNames[i])
+ }
+ genView(&output, tracker, namedType, pkg)
+ }
+
+ for _, pkgName := range tt.wantImports {
+ if !tracker.Has(pkgName) {
+ t.Errorf("missing import %q", pkgName)
+ }
+ }
+ })
+ }
+}
diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go
index 3994af056fc3b..2e7c70fd1b162 100644
--- a/control/controlclient/noise.go
+++ b/control/controlclient/noise.go
@@ -17,7 +17,6 @@ import (
"golang.org/x/net/http2"
"tailscale.com/control/controlhttp"
- "tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/internal/noiseconn"
"tailscale.com/net/dnscache"
@@ -30,7 +29,6 @@ import (
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/util/singleflight"
- "tailscale.com/util/testenv"
)
// NoiseClient provides a http.Client to connect to tailcontrol over
@@ -107,11 +105,6 @@ type NoiseOpts struct {
DialPlan func() *tailcfg.ControlDialPlan
}
-// controlIsPlaintext is whether we should assume that the controlplane is only accessible
-// over plaintext HTTP (as the first hop, before the ts2021 encryption begins).
-// This is used by some tests which don't have a real TLS certificate.
-var controlIsPlaintext = envknob.RegisterBool("TS_CONTROL_IS_PLAINTEXT_HTTP")
-
// NewNoiseClient returns a new noiseClient for the provided server and machine key.
// serverURL is of the form https://: (no trailing slash).
//
@@ -129,7 +122,7 @@ func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) {
if u.Scheme == "http" {
httpPort = port
httpsPort = "443"
- if (testenv.InTest() || controlIsPlaintext()) && (u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost") {
+ if u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost" {
httpsPort = ""
}
} else {
diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go
index f2627bd0a50fa..69a3a6a36551d 100644
--- a/control/controlclient/noise_test.go
+++ b/control/controlclient/noise_test.go
@@ -15,7 +15,7 @@ import (
"time"
"golang.org/x/net/http2"
- "tailscale.com/control/controlhttp"
+ "tailscale.com/control/controlhttp/controlhttpserver"
"tailscale.com/internal/noiseconn"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
@@ -201,7 +201,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return nil
}
- cbConn, err := controlhttp.AcceptHTTP(r.Context(), w, r, up.noiseKeyPriv, earlyWriteFn)
+ cbConn, err := controlhttpserver.AcceptHTTP(r.Context(), w, r, up.noiseKeyPriv, earlyWriteFn)
if err != nil {
up.logf("controlhttp: Accept: %v", err)
return
diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go
index 0e3dd038e4ed7..a5d42ad7df4a2 100644
--- a/control/controlclient/sign_supported.go
+++ b/control/controlclient/sign_supported.go
@@ -13,7 +13,6 @@ import (
"crypto/x509"
"errors"
"fmt"
- "sync"
"time"
"github.com/tailscale/certstore"
@@ -22,11 +21,6 @@ import (
"tailscale.com/util/syspolicy"
)
-var getMachineCertificateSubjectOnce struct {
- sync.Once
- v string // Subject of machine certificate to search for
-}
-
// getMachineCertificateSubject returns the exact name of a Subject that needs
// to be present in an identity's certificate chain to sign a RegisterRequest,
// formatted as per pkix.Name.String(). The Subject may be that of the identity
@@ -37,11 +31,8 @@ var getMachineCertificateSubjectOnce struct {
//
// Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA"
func getMachineCertificateSubject() string {
- getMachineCertificateSubjectOnce.Do(func() {
- getMachineCertificateSubjectOnce.v, _ = syspolicy.GetString(syspolicy.MachineCertificateSubject, "")
- })
-
- return getMachineCertificateSubjectOnce.v
+ machineCertSubject, _ := syspolicy.GetString(syspolicy.MachineCertificateSubject, "")
+ return machineCertSubject
}
var (
diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go
index 7e5263e3317fe..9b1d5a1a598e7 100644
--- a/control/controlhttp/client.go
+++ b/control/controlhttp/client.go
@@ -38,6 +38,7 @@ import (
"time"
"tailscale.com/control/controlbase"
+ "tailscale.com/control/controlhttp/controlhttpcommon"
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/net/dnscache"
@@ -571,9 +572,9 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad
Method: "POST",
URL: u,
Header: http.Header{
- "Upgrade": []string{upgradeHeaderValue},
- "Connection": []string{"upgrade"},
- handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
+ "Upgrade": []string{controlhttpcommon.UpgradeHeaderValue},
+ "Connection": []string{"upgrade"},
+ controlhttpcommon.HandshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
},
}
req = req.WithContext(ctx)
@@ -597,7 +598,7 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad
return nil, fmt.Errorf("httptrace didn't provide a connection")
}
- if next := resp.Header.Get("Upgrade"); next != upgradeHeaderValue {
+ if next := resp.Header.Get("Upgrade"); next != controlhttpcommon.UpgradeHeaderValue {
resp.Body.Close()
return nil, fmt.Errorf("server switched to unexpected protocol %q", next)
}
diff --git a/control/controlhttp/client_js.go b/control/controlhttp/client_js.go
index 4b7126b52cf38..cc05b5b192766 100644
--- a/control/controlhttp/client_js.go
+++ b/control/controlhttp/client_js.go
@@ -12,6 +12,7 @@ import (
"github.com/coder/websocket"
"tailscale.com/control/controlbase"
+ "tailscale.com/control/controlhttp/controlhttpcommon"
"tailscale.com/net/wsconn"
)
@@ -42,11 +43,11 @@ func (d *Dialer) Dial(ctx context.Context) (*ClientConn, error) {
// Can't set HTTP headers on the websocket request, so we have to to send
// the handshake via an HTTP header.
RawQuery: url.Values{
- handshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
+ controlhttpcommon.HandshakeHeaderName: []string{base64.StdEncoding.EncodeToString(init)},
}.Encode(),
}
wsConn, _, err := websocket.Dial(ctx, wsURL.String(), &websocket.DialOptions{
- Subprotocols: []string{upgradeHeaderValue},
+ Subprotocols: []string{controlhttpcommon.UpgradeHeaderValue},
})
if err != nil {
return nil, err
diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go
index ea1725e76d438..971212d63b994 100644
--- a/control/controlhttp/constants.go
+++ b/control/controlhttp/constants.go
@@ -18,15 +18,6 @@ import (
)
const (
- // upgradeHeader is the value of the Upgrade HTTP header used to
- // indicate the Tailscale control protocol.
- upgradeHeaderValue = "tailscale-control-protocol"
-
- // handshakeHeaderName is the HTTP request header that can
- // optionally contain base64-encoded initial handshake
- // payload, to save an RTT.
- handshakeHeaderName = "X-Tailscale-Handshake"
-
// serverUpgradePath is where the server-side HTTP handler to
// to do the protocol switch is located.
serverUpgradePath = "/ts2021"
@@ -85,6 +76,8 @@ type Dialer struct {
// dropped.
Logf logger.Logf
+ // NetMon is the [netmon.Monitor] to use for this Dialer. It must be
+ // non-nil.
NetMon *netmon.Monitor
// HealthTracker, if non-nil, is the health tracker to use.
diff --git a/control/controlhttp/controlhttpcommon/controlhttpcommon.go b/control/controlhttp/controlhttpcommon/controlhttpcommon.go
new file mode 100644
index 0000000000000..a86b7ca04a7f4
--- /dev/null
+++ b/control/controlhttp/controlhttpcommon/controlhttpcommon.go
@@ -0,0 +1,15 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package controlhttpcommon contains common constants for used
+// by the controlhttp client and controlhttpserver packages.
+package controlhttpcommon
+
+// UpgradeHeader is the value of the Upgrade HTTP header used to
+// indicate the Tailscale control protocol.
+const UpgradeHeaderValue = "tailscale-control-protocol"
+
+// handshakeHeaderName is the HTTP request header that can
+// optionally contain base64-encoded initial handshake
+// payload, to save an RTT.
+const HandshakeHeaderName = "X-Tailscale-Handshake"
diff --git a/control/controlhttp/server.go b/control/controlhttp/controlhttpserver/controlhttpserver.go
similarity index 92%
rename from control/controlhttp/server.go
rename to control/controlhttp/controlhttpserver/controlhttpserver.go
index 7c3dd5618c4a3..af320781069d1 100644
--- a/control/controlhttp/server.go
+++ b/control/controlhttp/controlhttpserver/controlhttpserver.go
@@ -3,7 +3,8 @@
//go:build !ios
-package controlhttp
+// Package controlhttpserver contains the HTTP server side of the ts2021 control protocol.
+package controlhttpserver
import (
"context"
@@ -18,6 +19,7 @@ import (
"github.com/coder/websocket"
"tailscale.com/control/controlbase"
+ "tailscale.com/control/controlhttp/controlhttpcommon"
"tailscale.com/net/netutil"
"tailscale.com/net/wsconn"
"tailscale.com/types/key"
@@ -45,12 +47,12 @@ func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, pri
if next == "websocket" {
return acceptWebsocket(ctx, w, r, private)
}
- if next != upgradeHeaderValue {
+ if next != controlhttpcommon.UpgradeHeaderValue {
http.Error(w, "unknown next protocol", http.StatusBadRequest)
return nil, fmt.Errorf("client requested unhandled next protocol %q", next)
}
- initB64 := r.Header.Get(handshakeHeaderName)
+ initB64 := r.Header.Get(controlhttpcommon.HandshakeHeaderName)
if initB64 == "" {
http.Error(w, "missing Tailscale handshake header", http.StatusBadRequest)
return nil, errors.New("no tailscale handshake header in HTTP request")
@@ -67,7 +69,7 @@ func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, pri
return nil, errors.New("can't hijack client connection")
}
- w.Header().Set("Upgrade", upgradeHeaderValue)
+ w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue)
w.Header().Set("Connection", "upgrade")
w.WriteHeader(http.StatusSwitchingProtocols)
@@ -117,7 +119,7 @@ func acceptHTTP(ctx context.Context, w http.ResponseWriter, r *http.Request, pri
// speak HTTP) to a Tailscale control protocol base transport connection.
func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request, private key.MachinePrivate) (*controlbase.Conn, error) {
c, err := websocket.Accept(w, r, &websocket.AcceptOptions{
- Subprotocols: []string{upgradeHeaderValue},
+ Subprotocols: []string{controlhttpcommon.UpgradeHeaderValue},
OriginPatterns: []string{"*"},
// Disable compression because we transmit Noise messages that are not
// compressible.
@@ -129,7 +131,7 @@ func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request
if err != nil {
return nil, fmt.Errorf("Could not accept WebSocket connection %v", err)
}
- if c.Subprotocol() != upgradeHeaderValue {
+ if c.Subprotocol() != controlhttpcommon.UpgradeHeaderValue {
c.Close(websocket.StatusPolicyViolation, "client must speak the control subprotocol")
return nil, fmt.Errorf("Unexpected subprotocol %q", c.Subprotocol())
}
@@ -137,7 +139,7 @@ func acceptWebsocket(ctx context.Context, w http.ResponseWriter, r *http.Request
c.Close(websocket.StatusPolicyViolation, "Could not parse parameters")
return nil, fmt.Errorf("parse query parameters: %v", err)
}
- initB64 := r.Form.Get(handshakeHeaderName)
+ initB64 := r.Form.Get(controlhttpcommon.HandshakeHeaderName)
if initB64 == "" {
c.Close(websocket.StatusPolicyViolation, "missing Tailscale handshake parameter")
return nil, errors.New("no tailscale handshake parameter in HTTP request")
diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go
index 8c8ed7f5701b0..aef916ef651c1 100644
--- a/control/controlhttp/http_test.go
+++ b/control/controlhttp/http_test.go
@@ -23,12 +23,16 @@ import (
"time"
"tailscale.com/control/controlbase"
+ "tailscale.com/control/controlhttp/controlhttpcommon"
+ "tailscale.com/control/controlhttp/controlhttpserver"
+ "tailscale.com/health"
"tailscale.com/net/dnscache"
"tailscale.com/net/netmon"
"tailscale.com/net/socks5"
"tailscale.com/net/tsdial"
"tailscale.com/tailcfg"
"tailscale.com/tstest"
+ "tailscale.com/tstest/deptest"
"tailscale.com/tstime"
"tailscale.com/types/key"
"tailscale.com/types/logger"
@@ -158,7 +162,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) {
return err
}
}
- conn, err := AcceptHTTP(context.Background(), w, r, server, earlyWriteFn)
+ conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, earlyWriteFn)
if err != nil {
log.Print(err)
}
@@ -225,6 +229,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) {
omitCertErrorLogging: true,
testFallbackDelay: fallbackDelay,
Clock: clock,
+ HealthTracker: new(health.Tracker),
}
if param.httpInDial {
@@ -529,7 +534,7 @@ EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA==
func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Upgrade", upgradeHeaderValue)
+ w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue)
w.Header().Set("Connection", "upgrade")
w.WriteHeader(http.StatusSwitchingProtocols)
w.(http.Flusher).Flush()
@@ -574,7 +579,7 @@ func TestDialPlan(t *testing.T) {
close(done)
})
var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- conn, err := AcceptHTTP(context.Background(), w, r, server, nil)
+ conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, nil)
if err != nil {
log.Print(err)
} else {
@@ -726,6 +731,7 @@ func TestDialPlan(t *testing.T) {
omitCertErrorLogging: true,
testFallbackDelay: 50 * time.Millisecond,
Clock: clock,
+ HealthTracker: new(health.Tracker),
}
conn, err := a.dial(ctx)
@@ -816,3 +822,14 @@ func (c *closeTrackConn) Close() error {
c.d.noteClose(c)
return c.Conn.Close()
}
+
+func TestDeps(t *testing.T) {
+ deptest.DepChecker{
+ GOOS: "darwin",
+ GOARCH: "arm64",
+ BadDeps: map[string]string{
+ // Only the controlhttpserver needs WebSockets...
+ "github.com/coder/websocket": "controlhttp client shouldn't need websockets",
+ },
+ }.Check(t)
+}
diff --git a/derp/derp.go b/derp/derp.go
index f9b0706477358..878188cd20625 100644
--- a/derp/derp.go
+++ b/derp/derp.go
@@ -147,6 +147,7 @@ const (
PeerPresentIsRegular = 1 << 0
PeerPresentIsMeshPeer = 1 << 1
PeerPresentIsProber = 1 << 2
+ PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node
)
var bin = binary.BigEndian
diff --git a/derp/derp_server.go b/derp/derp_server.go
index 8c5d6e890567b..ab0ab0a908a07 100644
--- a/derp/derp_server.go
+++ b/derp/derp_server.go
@@ -26,6 +26,7 @@ import (
"net"
"net/http"
"net/netip"
+ "os"
"os/exec"
"runtime"
"strconv"
@@ -46,6 +47,7 @@ import (
"tailscale.com/tstime/rate"
"tailscale.com/types/key"
"tailscale.com/types/logger"
+ "tailscale.com/util/ctxkey"
"tailscale.com/util/mak"
"tailscale.com/util/set"
"tailscale.com/util/slicesx"
@@ -56,6 +58,16 @@ import (
// verbosely log whenever DERP drops a packet.
var verboseDropKeys = map[key.NodePublic]bool{}
+// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests
+// to indicate that they're connecting to their ideal (Region.Nodes[0]) node.
+// The HTTP header value is the name of the node they wish they were connected
+// to. This is an optional header.
+const IdealNodeHeader = "Ideal-Node"
+
+// IdealNodeContextKey is the context key used to pass the IdealNodeHeader value
+// from the HTTP handler to the DERP server's Accept method.
+var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "")
+
func init() {
keys := envknob.String("TS_DEBUG_VERBOSE_DROPS")
if keys == "" {
@@ -74,6 +86,7 @@ func init() {
const (
perClientSendQueueDepth = 32 // packets buffered for sending
writeTimeout = 2 * time.Second
+ privilegedWriteTimeout = 30 * time.Second // for clients with the mesh key
)
// dupPolicy is a temporary (2021-08-30) mechanism to change the policy
@@ -131,6 +144,7 @@ type Server struct {
sentPong expvar.Int // number of pong frames enqueued to client
accepts expvar.Int
curClients expvar.Int
+ curClientsNotIdeal expvar.Int
curHomeClients expvar.Int // ones with preferred
dupClientKeys expvar.Int // current number of public keys we have 2+ connections for
dupClientConns expvar.Int // current number of connections sharing a public key
@@ -141,6 +155,7 @@ type Server struct {
multiForwarderCreated expvar.Int
multiForwarderDeleted expvar.Int
removePktForwardOther expvar.Int
+ sclientWriteTimeouts expvar.Int
avgQueueDuration *uint64 // In milliseconds; accessed atomically
tcpRtt metrics.LabelMap // histogram
meshUpdateBatchSize *metrics.Histogram
@@ -600,6 +615,9 @@ func (s *Server) registerClient(c *sclient) {
}
s.keyOfAddr[c.remoteIPPort] = c.key
s.curClients.Add(1)
+ if c.isNotIdealConn {
+ s.curClientsNotIdeal.Add(1)
+ }
s.broadcastPeerStateChangeLocked(c.key, c.remoteIPPort, c.presentFlags(), true)
}
@@ -690,6 +708,9 @@ func (s *Server) unregisterClient(c *sclient) {
if c.preferred {
s.curHomeClients.Add(-1)
}
+ if c.isNotIdealConn {
+ s.curClientsNotIdeal.Add(-1)
+ }
}
// addPeerGoneFromRegionWatcher adds a function to be called when peer is gone
@@ -806,8 +827,8 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem
return fmt.Errorf("receive client key: %v", err)
}
- clientAP, _ := netip.ParseAddrPort(remoteAddr)
- if err := s.verifyClient(ctx, clientKey, clientInfo, clientAP.Addr()); err != nil {
+ remoteIPPort, _ := netip.ParseAddrPort(remoteAddr)
+ if err := s.verifyClient(ctx, clientKey, clientInfo, remoteIPPort.Addr()); err != nil {
return fmt.Errorf("client %v rejected: %v", clientKey, err)
}
@@ -817,8 +838,6 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- remoteIPPort, _ := netip.ParseAddrPort(remoteAddr)
-
c := &sclient{
connNum: connNum,
s: s,
@@ -835,6 +854,7 @@ func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, rem
sendPongCh: make(chan [8]byte, 1),
peerGone: make(chan peerGoneMsg),
canMesh: s.isMeshPeer(clientInfo),
+ isNotIdealConn: IdealNodeContextKey.Value(ctx) != "",
peerGoneLim: rate.NewLimiter(rate.Every(time.Second), 3),
}
@@ -881,6 +901,9 @@ func (c *sclient) run(ctx context.Context) error {
if errors.Is(err, context.Canceled) {
c.debugLogf("sender canceled by reader exiting")
} else {
+ if errors.Is(err, os.ErrDeadlineExceeded) {
+ c.s.sclientWriteTimeouts.Add(1)
+ }
c.logf("sender failed: %v", err)
}
}
@@ -1505,6 +1528,7 @@ type sclient struct {
peerGone chan peerGoneMsg // write request that a peer is not at this server (not used by mesh peers)
meshUpdate chan struct{} // write request to write peerStateChange
canMesh bool // clientInfo had correct mesh token for inter-region routing
+ isNotIdealConn bool // client indicated it is not its ideal node in the region
isDup atomic.Bool // whether more than 1 sclient for key is connected
isDisabled atomic.Bool // whether sends to this peer are disabled due to active/active dups
debug bool // turn on for verbose logging
@@ -1540,6 +1564,9 @@ func (c *sclient) presentFlags() PeerPresentFlags {
if c.canMesh {
f |= PeerPresentIsMeshPeer
}
+ if c.isNotIdealConn {
+ f |= PeerPresentNotIdeal
+ }
if f == 0 {
return PeerPresentIsRegular
}
@@ -1721,7 +1748,19 @@ func (c *sclient) sendLoop(ctx context.Context) error {
}
func (c *sclient) setWriteDeadline() {
- c.nc.SetWriteDeadline(time.Now().Add(writeTimeout))
+ d := writeTimeout
+ if c.canMesh {
+ // Trusted peers get more tolerance.
+ //
+ // The "canMesh" is a bit of a misnomer; mesh peers typically run over a
+ // different interface for a per-region private VPC and are not
+ // throttled. But monitoring software elsewhere over the internet also
+ // use the private mesh key to subscribe to connect/disconnect events
+ // and might hit throttling and need more time to get the initial dump
+ // of connected peers.
+ d = privilegedWriteTimeout
+ }
+ c.nc.SetWriteDeadline(time.Now().Add(d))
}
// sendKeepAlive sends a keep-alive frame, without flushing.
@@ -2033,6 +2072,7 @@ func (s *Server) ExpVar() expvar.Var {
m.Set("gauge_current_file_descriptors", expvar.Func(func() any { return metrics.CurrentFDs() }))
m.Set("gauge_current_connections", &s.curClients)
m.Set("gauge_current_home_connections", &s.curHomeClients)
+ m.Set("gauge_current_notideal_connections", &s.curClientsNotIdeal)
m.Set("gauge_clients_total", expvar.Func(func() any { return len(s.clientsMesh) }))
m.Set("gauge_clients_local", expvar.Func(func() any { return len(s.clients) }))
m.Set("gauge_clients_remote", expvar.Func(func() any { return len(s.clientsMesh) - len(s.clients) }))
@@ -2060,6 +2100,7 @@ func (s *Server) ExpVar() expvar.Var {
m.Set("multiforwarder_created", &s.multiForwarderCreated)
m.Set("multiforwarder_deleted", &s.multiForwarderDeleted)
m.Set("packet_forwarder_delete_other_value", &s.removePktForwardOther)
+ m.Set("sclient_write_timeouts", &s.sclientWriteTimeouts)
m.Set("average_queue_duration_ms", expvar.Func(func() any {
return math.Float64frombits(atomic.LoadUint64(s.avgQueueDuration))
}))
diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go
index b8cce8cdcb4fa..c95d072b1a572 100644
--- a/derp/derphttp/derphttp_client.go
+++ b/derp/derphttp/derphttp_client.go
@@ -313,6 +313,9 @@ func (c *Client) preferIPv6() bool {
var dialWebsocketFunc func(ctx context.Context, urlStr string) (net.Conn, error)
func useWebsockets() bool {
+ if !canWebsockets {
+ return false
+ }
if runtime.GOOS == "js" {
return true
}
@@ -383,7 +386,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien
var node *tailcfg.DERPNode // nil when using c.url to dial
var idealNodeInRegion bool
switch {
- case useWebsockets():
+ case canWebsockets && useWebsockets():
var urlStr string
if c.url != nil {
urlStr = c.url.String()
@@ -498,7 +501,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien
req.Header.Set("Connection", "Upgrade")
if !idealNodeInRegion && reg != nil {
// This is purely informative for now (2024-07-06) for stats:
- req.Header.Set("Ideal-Node", reg.Nodes[0].Name)
+ req.Header.Set(derp.IdealNodeHeader, reg.Nodes[0].Name)
// TODO(bradfitz,raggi): start a time.AfterFunc for 30m-1h or so to
// dialNode(reg.Nodes[0]) and see if we can even TCP connect to it. If
// so, TLS handshake it as well (which is mixed up in this massive
diff --git a/derp/derphttp/derphttp_server.go b/derp/derphttp/derphttp_server.go
index 41ce86764f66a..ed7d3d7073866 100644
--- a/derp/derphttp/derphttp_server.go
+++ b/derp/derphttp/derphttp_server.go
@@ -21,6 +21,8 @@ const fastStartHeader = "Derp-Fast-Start"
// Handler returns an http.Handler to be mounted at /derp, serving s.
func Handler(s *derp.Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
// These are installed both here and in cmd/derper. The check here
// catches both cmd/derper run with DERP disabled (STUN only mode) as
// well as DERP being run in tests with derphttp.Handler directly,
@@ -66,7 +68,11 @@ func Handler(s *derp.Server) http.Handler {
pubKey.UntypedHexString())
}
- s.Accept(r.Context(), netConn, conn, netConn.RemoteAddr().String())
+ if v := r.Header.Get(derp.IdealNodeHeader); v != "" {
+ ctx = derp.IdealNodeContextKey.WithValue(ctx, v)
+ }
+
+ s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String())
})
}
diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go
index cfb3676cda16f..cf6032a5e6d43 100644
--- a/derp/derphttp/derphttp_test.go
+++ b/derp/derphttp/derphttp_test.go
@@ -17,7 +17,9 @@ import (
"tailscale.com/derp"
"tailscale.com/net/netmon"
+ "tailscale.com/tstest/deptest"
"tailscale.com/types/key"
+ "tailscale.com/util/set"
)
func TestSendRecv(t *testing.T) {
@@ -485,3 +487,23 @@ func TestProbe(t *testing.T) {
}
}
}
+
+func TestDeps(t *testing.T) {
+ deptest.DepChecker{
+ GOOS: "darwin",
+ GOARCH: "arm64",
+ BadDeps: map[string]string{
+ "github.com/coder/websocket": "shouldn't link websockets except on js/wasm",
+ },
+ }.Check(t)
+
+ deptest.DepChecker{
+ GOOS: "darwin",
+ GOARCH: "arm64",
+ Tags: "ts_debug_websockets",
+ WantDeps: set.Of(
+ "github.com/coder/websocket",
+ ),
+ }.Check(t)
+
+}
diff --git a/derp/derphttp/websocket.go b/derp/derphttp/websocket.go
index 6ef47473a2532..9dd640ee37083 100644
--- a/derp/derphttp/websocket.go
+++ b/derp/derphttp/websocket.go
@@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
-//go:build linux || js
+//go:build js || ((linux || darwin) && ts_debug_websockets)
package derphttp
@@ -14,6 +14,8 @@ import (
"tailscale.com/net/wsconn"
)
+const canWebsockets = true
+
func init() {
dialWebsocketFunc = dialWebsocket
}
diff --git a/derp/derphttp/websocket_stub.go b/derp/derphttp/websocket_stub.go
new file mode 100644
index 0000000000000..d84bfba571f80
--- /dev/null
+++ b/derp/derphttp/websocket_stub.go
@@ -0,0 +1,8 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !(js || ((linux || darwin) && ts_debug_websockets))
+
+package derphttp
+
+const canWebsockets = false
diff --git a/docs/k8s/proxy.yaml b/docs/k8s/proxy.yaml
index 2ab7ed334395d..048fd7a5bddf9 100644
--- a/docs/k8s/proxy.yaml
+++ b/docs/k8s/proxy.yaml
@@ -44,7 +44,13 @@ spec:
value: "{{TS_DEST_IP}}"
- name: TS_AUTH_ONCE
value: "true"
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
securityContext:
- capabilities:
- add:
- - NET_ADMIN
+ privileged: true
diff --git a/docs/k8s/role.yaml b/docs/k8s/role.yaml
index 6d6a8117d1bbd..d7d0846ab29a6 100644
--- a/docs/k8s/role.yaml
+++ b/docs/k8s/role.yaml
@@ -13,3 +13,6 @@ rules:
resourceNames: ["{{TS_KUBE_SECRET}}"]
resources: ["secrets"]
verbs: ["get", "update", "patch"]
+- apiGroups: [""] # "" indicates the core API group
+ resources: ["events"]
+ verbs: ["get", "create", "patch"]
diff --git a/docs/k8s/sidecar.yaml b/docs/k8s/sidecar.yaml
index 7efd32a38d0ac..520e4379ad9ee 100644
--- a/docs/k8s/sidecar.yaml
+++ b/docs/k8s/sidecar.yaml
@@ -26,7 +26,13 @@ spec:
name: tailscale-auth
key: TS_AUTHKEY
optional: true
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
securityContext:
- capabilities:
- add:
- - NET_ADMIN
+ privileged: true
diff --git a/docs/k8s/subnet.yaml b/docs/k8s/subnet.yaml
index 4b7066fb3460a..ef4e4748c0ceb 100644
--- a/docs/k8s/subnet.yaml
+++ b/docs/k8s/subnet.yaml
@@ -28,7 +28,13 @@ spec:
optional: true
- name: TS_ROUTES
value: "{{TS_ROUTES}}"
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
securityContext:
- capabilities:
- add:
- - NET_ADMIN
+ privileged: true
diff --git a/docs/k8s/userspace-sidecar.yaml b/docs/k8s/userspace-sidecar.yaml
index fc4ed63502dbc..ee19b10a5e5dd 100644
--- a/docs/k8s/userspace-sidecar.yaml
+++ b/docs/k8s/userspace-sidecar.yaml
@@ -27,3 +27,11 @@ spec:
name: tailscale-auth
key: TS_AUTHKEY
optional: true
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_UID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml
index 7a658422cd7f6..ebf1a5905f6e9 100644
--- a/docs/windows/policy/en-US/tailscale.adml
+++ b/docs/windows/policy/en-US/tailscale.adml
@@ -15,16 +15,18 @@
Tailscale version 1.58.0 and later
Tailscale version 1.62.0 and later
Tailscale version 1.74.0 and later
+ Tailscale version 1.78.0 and later
Tailscale
UI customization
Settings
Require using a specific Tailscale coordination server
+If you disable or do not configure this policy, the Tailscale SaaS coordination server will be used by default, but a non-standard Tailscale coordination server can be configured using the CLI.
+
+See https://tailscale.com/kb/1315/mdm-keys#set-a-custom-control-server-url for more details.]]>
Require using a specific Tailscale log server
Specify which Tailnet should be used for Login
+See https://tailscale.com/kb/1315/mdm-keys#set-a-suggested-or-required-tailnet for more details.]]>
Specify the auth key to authenticate devices without user interaction
Require using a specific Exit Node
+If you do not configure this policy, no exit node will be used by default but an exit node (if one is available and permitted by ACLs) can be chosen by the user if desired.
+
+See https://tailscale.com/kb/1315/mdm-keys#force-an-exit-node-to-always-be-used and https://tailscale.com/kb/1103/exit-nodes for more details.]]>
+ Limit automated Exit Node suggestions to specific nodes
+
Allow incoming connections
+If you do not configure this policy, then Allow Incoming Connections depends on what is selected in the Preferences submenu.
+
+See https://tailscale.com/kb/1315/mdm-keys#set-whether-to-allow-incoming-connections and https://tailscale.com/kb/1072/client-preferences#allow-incoming-connections for more details.]]>
Run Tailscale in Unattended Mode
+If you do not configure this policy, then Run Unattended depends on what is selected in the Preferences submenu.
+
+See https://tailscale.com/kb/1315/mdm-keys#set-unattended-mode and https://tailscale.com/kb/1088/run-unattended for more details.]]>
Allow Local Network Access when an Exit Node is in use
+If you do not configure this policy, then Allow Local Network Access depends on what is selected in the Exit Node submenu.
+
+See https://tailscale.com/kb/1315/mdm-keys#toggle-local-network-access-when-an-exit-node-is-in-use and https://tailscale.com/kb/1103/exit-nodes#step-4-use-the-exit-node for more details.]]>
Use Tailscale DNS Settings
+If you do not configure this policy, then Use Tailscale DNS depends on what is selected in the Preferences submenu.
+
+See https://tailscale.com/kb/1315/mdm-keys#set-whether-the-device-uses-tailscale-dns-settings for more details.]]>
Use Tailscale Subnets
+If you do not configure this policy, then Use Tailscale Subnets depends on what is selected in the Preferences submenu.
+
+See https://tailscale.com/kb/1315/mdm-keys#set-whether-the-device-accepts-tailscale-subnets or https://tailscale.com/kb/1019/subnets for more details.]]>
Automatically install updates
+If you do not configure this policy, then Automatically Install Updates depends on what is selected in the Preferences submenu.
+
+See https://tailscale.com/kb/1067/update#auto-updates for more details.]]>
Run Tailscale as an Exit Node
- Show the "Admin Panel" menu item
-
+ Show the "Admin Console" menu item
+
+If you disable this policy, the Debug submenu will be hidden from the Tailscale menu.
+
+See https://tailscale.com/kb/1315/mdm-keys#hide-the-debug-menu for more details.]]>
+If you disable this policy, the Update Available item will be hidden from the Tailscale menu.
+
+See https://tailscale.com/kb/1315/mdm-keys#hide-the-update-menu for more details.]]>
Show the "Run Exit Node" menu item
+If you disable this policy, the Run Exit Node item will be hidden from the Exit Node submenu.
+
+See https://tailscale.com/kb/1315/mdm-keys#hide-the-run-as-exit-node-menu-item for more details.]]>
+If you disable this policy, the Preferences submenu will be hidden from the Tailscale menu.
+
+See https://tailscale.com/kb/1315/mdm-keys#hide-the-preferences-menu for more details.]]>
Show the "Exit Node" submenu
+If you disable this policy, the Exit Node submenu will be hidden from the Tailscale menu.
+
+See https://tailscale.com/kb/1315/mdm-keys#hide-the-exit-node-picker for more details.]]>
Specify a custom key expiration notification time
+If you disable or don't configure this policy, the default time period will be used (as of Tailscale 1.56, this is 24 hours).
+
+See https://tailscale.com/kb/1315/mdm-keys#set-the-key-expiration-notice-period for more details.]]>
Log extra details about service events
Collect data for posture checking
+If you do not configure this policy, then data collection depends on if it has been enabled from the CLI (as of Tailscale 1.56), it may be present in the GUI in later versions.
+
+See https://tailscale.com/kb/1315/mdm-keys#enable-gathering-device-posture-data and https://tailscale.com/kb/1326/device-identity for more details.]]>
Show the "Managed By {Organization}" menu item
Exit Node:
+
+ Target IDs:
+
diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx
index e70f124ed1a36..f941525c4fc9c 100644
--- a/docs/windows/policy/tailscale.admx
+++ b/docs/windows/policy/tailscale.admx
@@ -50,6 +50,10 @@
displayName="$(string.SINCE_V1_74)">
+
+
+
@@ -94,7 +98,14 @@
-
+ >
+
+
+
+
+
+
+
@@ -197,7 +208,7 @@
-
+
@@ -207,7 +218,7 @@
hide
-
+
@@ -217,7 +228,7 @@
hide
-
+
@@ -227,7 +238,7 @@
hide
-
+
@@ -237,7 +248,7 @@
hide
-
+
@@ -247,7 +258,7 @@
hide
-
+
@@ -257,7 +268,7 @@
hide
-
+
@@ -267,7 +278,7 @@
hide
-
+
@@ -276,7 +287,7 @@
-
+
diff --git a/envknob/envknob.go b/envknob/envknob.go
index 59a6d90af213b..e74bfea71bdb3 100644
--- a/envknob/envknob.go
+++ b/envknob/envknob.go
@@ -411,7 +411,7 @@ func TKASkipSignatureCheck() bool { return Bool("TS_UNSAFE_SKIP_NKS_VERIFICATION
// Kubernetes Operator components.
func App() string {
a := os.Getenv("TS_INTERNAL_APP")
- if a == kubetypes.AppConnector || a == kubetypes.AppEgressProxy || a == kubetypes.AppIngressProxy || a == kubetypes.AppIngressResource {
+ if a == kubetypes.AppConnector || a == kubetypes.AppEgressProxy || a == kubetypes.AppIngressProxy || a == kubetypes.AppIngressResource || a == kubetypes.AppProxyGroupEgress || a == kubetypes.AppProxyGroupIngress {
return a
}
return ""
diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go
new file mode 100644
index 0000000000000..d7af80d239782
--- /dev/null
+++ b/envknob/featureknob/featureknob.go
@@ -0,0 +1,68 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package featureknob provides a facility to control whether features
+// can run based on either an envknob or running OS / distro.
+package featureknob
+
+import (
+ "errors"
+ "runtime"
+
+ "tailscale.com/envknob"
+ "tailscale.com/hostinfo"
+ "tailscale.com/version"
+ "tailscale.com/version/distro"
+)
+
+// CanRunTailscaleSSH reports whether serving a Tailscale SSH server is
+// supported for the current os/distro.
+func CanRunTailscaleSSH() error {
+ switch runtime.GOOS {
+ case "linux":
+ if distro.Get() == distro.Synology && !envknob.UseWIPCode() {
+ return errors.New("The Tailscale SSH server does not run on Synology.")
+ }
+ if distro.Get() == distro.QNAP && !envknob.UseWIPCode() {
+ return errors.New("The Tailscale SSH server does not run on QNAP.")
+ }
+
+ // Setting SSH on Home Assistant causes trouble on startup
+ // (since the flag is not being passed to `tailscale up`).
+ // Although Tailscale SSH does work here,
+ // it's not terribly useful since it's running in a separate container.
+ if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn {
+ return errors.New("The Tailscale SSH server does not run on HomeAssistant.")
+ }
+ // otherwise okay
+ case "darwin":
+ // okay only in tailscaled mode for now.
+ if version.IsSandboxedMacOS() {
+ return errors.New("The Tailscale SSH server does not run in sandboxed Tailscale GUI builds.")
+ }
+ case "freebsd", "openbsd":
+ default:
+ return errors.New("The Tailscale SSH server is not supported on " + runtime.GOOS)
+ }
+ if !envknob.CanSSHD() {
+ return errors.New("The Tailscale SSH server has been administratively disabled.")
+ }
+ return nil
+}
+
+// CanUseExitNode reports whether using an exit node is supported for the
+// current os/distro.
+func CanUseExitNode() error {
+ switch dist := distro.Get(); dist {
+ case distro.Synology, // see https://github.com/tailscale/tailscale/issues/1995
+ distro.QNAP,
+ distro.Unraid:
+ return errors.New("Tailscale exit nodes cannot be used on " + string(dist))
+ }
+
+ if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn {
+ return errors.New("Tailscale exit nodes cannot be used on HomeAssistant.")
+ }
+
+ return nil
+}
diff --git a/envknob/features.go b/envknob/features.go
deleted file mode 100644
index 9e5909de309f0..0000000000000
--- a/envknob/features.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package envknob
-
-import (
- "errors"
- "runtime"
-
- "tailscale.com/version"
- "tailscale.com/version/distro"
-)
-
-// CanRunTailscaleSSH reports whether serving a Tailscale SSH server is
-// supported for the current os/distro.
-func CanRunTailscaleSSH() error {
- switch runtime.GOOS {
- case "linux":
- if distro.Get() == distro.Synology && !UseWIPCode() {
- return errors.New("The Tailscale SSH server does not run on Synology.")
- }
- if distro.Get() == distro.QNAP && !UseWIPCode() {
- return errors.New("The Tailscale SSH server does not run on QNAP.")
- }
- // otherwise okay
- case "darwin":
- // okay only in tailscaled mode for now.
- if version.IsSandboxedMacOS() {
- return errors.New("The Tailscale SSH server does not run in sandboxed Tailscale GUI builds.")
- }
- case "freebsd", "openbsd":
- default:
- return errors.New("The Tailscale SSH server is not supported on " + runtime.GOOS)
- }
- if !CanSSHD() {
- return errors.New("The Tailscale SSH server has been administratively disabled.")
- }
- return nil
-}
diff --git a/go.mod b/go.mod
index 464db8313b5fd..1924e93ed5d32 100644
--- a/go.mod
+++ b/go.mod
@@ -42,7 +42,7 @@ require (
github.com/golang/snappy v0.0.4
github.com/golangci/golangci-lint v1.57.1
github.com/google/go-cmp v0.6.0
- github.com/google/go-containerregistry v0.18.0
+ github.com/google/go-containerregistry v0.20.2
github.com/google/gopacket v1.1.19
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806
github.com/google/uuid v1.6.0
@@ -55,7 +55,7 @@ require (
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86
github.com/jsimonetti/rtnetlink v1.4.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
- github.com/klauspost/compress v1.17.4
+ github.com/klauspost/compress v1.17.11
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
@@ -80,12 +80,12 @@ require (
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a
- github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba
+ github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6
- github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc
+ github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e
github.com/tc-hib/winres v0.2.1
github.com/tcnksm/go-httpstat v0.2.0
@@ -100,8 +100,8 @@ require (
golang.org/x/mod v0.19.0
golang.org/x/net v0.27.0
golang.org/x/oauth2 v0.16.0
- golang.org/x/sync v0.7.0
- golang.org/x/sys v0.22.0
+ golang.org/x/sync v0.9.0
+ golang.org/x/sys v0.27.0
golang.org/x/term v0.22.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.23.0
@@ -125,7 +125,7 @@ require (
github.com/Antonboom/testifylint v1.2.0 // indirect
github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
- github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
github.com/alecthomas/go-check-sumtype v0.1.4 // indirect
github.com/alexkohler/nakedret/v2 v2.0.4 // indirect
@@ -138,7 +138,7 @@ require (
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 // indirect
github.com/dave/brenda v1.1.0 // indirect
- github.com/docker/go-connections v0.4.0 // indirect
+ github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghostiam/protogetter v0.3.5 // indirect
@@ -160,10 +160,10 @@ require (
github.com/ykadowak/zerologlint v0.1.5 // indirect
go-simpler.org/musttag v0.9.0 // indirect
go-simpler.org/sloglint v0.5.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
- go.opentelemetry.io/otel v1.22.0 // indirect
- go.opentelemetry.io/otel/metric v1.22.0 // indirect
- go.opentelemetry.io/otel/trace v1.22.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
+ go.opentelemetry.io/otel v1.32.0 // indirect
+ go.opentelemetry.io/otel/metric v1.32.0 // indirect
+ go.opentelemetry.io/otel/trace v1.32.0 // indirect
go.uber.org/automaxprocs v1.5.3 // indirect
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
)
@@ -220,10 +220,10 @@ require (
github.com/daixiang0/gci v0.12.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/denis-tingaikin/go-header v0.5.0 // indirect
- github.com/docker/cli v25.0.0+incompatible // indirect
+ github.com/docker/cli v27.3.1+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
- github.com/docker/docker v26.1.4+incompatible // indirect
- github.com/docker/docker-credential-helpers v0.8.1 // indirect
+ github.com/docker/docker v27.3.1+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/emicklei/go-restful/v3 v3.11.2 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/ettle/strcase v0.2.0 // indirect
@@ -322,7 +322,7 @@ require (
github.com/nunnatsa/ginkgolinter v0.16.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.1.0-rc6 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.0 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
@@ -376,7 +376,7 @@ require (
github.com/ultraware/funlen v0.1.0 // indirect
github.com/ultraware/whitespace v0.1.0 // indirect
github.com/uudashr/gocognit v1.1.2 // indirect
- github.com/vbatts/tar-split v0.11.5 // indirect
+ github.com/vbatts/tar-split v0.11.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
@@ -396,7 +396,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1
howett.net/plist v1.0.0 // indirect
- k8s.io/apiextensions-apiserver v0.30.3 // indirect
+ k8s.io/apiextensions-apiserver v0.30.3
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
diff --git a/go.sum b/go.sum
index 549f559d001fd..fadfb22b1a0c8 100644
--- a/go.sum
+++ b/go.sum
@@ -79,8 +79,8 @@ github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuN
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA=
github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ=
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
@@ -277,16 +277,16 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
-github.com/docker/cli v25.0.0+incompatible h1:zaimaQdnX7fYWFqzN88exE9LDEvRslexpFowZBX6GoQ=
-github.com/docker/cli v25.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
+github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU=
-github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
-github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
+github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
@@ -490,8 +490,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.18.0 h1:ShE7erKNPqRh5ue6Z9DUOlk04WsnFWPO6YGr3OxnfoQ=
-github.com/google/go-containerregistry v0.18.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
+github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
+github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -627,8 +627,8 @@ github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8=
github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
-github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
-github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -749,8 +749,8 @@ github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU=
-github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
@@ -931,8 +931,8 @@ github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPx
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw=
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
-github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba h1:uNo1VCm/xg4alMkIKo8RWTKNx5y1otfVOcKbp+irkL4=
-github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba/go.mod h1:DxnqIXBplij66U2ZkL688xy07q97qQ83P+TVueLiHq4=
+github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10 h1:ZB47BgnHcEHQJODkDubs5ZiNeJxMhcgzefV3lykRwVQ=
+github.com/tailscale/mkctr v0.0.0-20241111153353-1a38f6676f10/go.mod h1:iDx/0Rr9VV/KanSUDpJ6I/ROf0sQ7OqljXc/esl0UIA=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w=
@@ -941,8 +941,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:t
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
-github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc=
-github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
+github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3 h1:dmoPb3dG27tZgMtrvqfD/LW4w7gA6BSWl8prCPNmkCQ=
+github.com/tailscale/wireguard-go v0.0.0-20241113014420-4e883d38c8d3/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
@@ -981,8 +981,8 @@ github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZ
github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0=
github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI=
github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k=
-github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
-github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
+github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
+github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
@@ -1022,20 +1022,20 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
-go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
-go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
+go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
+go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY=
-go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
-go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
-go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
+go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
+go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
+go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
@@ -1176,8 +1176,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
+golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1239,8 +1239,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
diff --git a/go.toolchain.rev b/go.toolchain.rev
index 5d87594c25a31..500d853e5e4bd 100644
--- a/go.toolchain.rev
+++ b/go.toolchain.rev
@@ -1 +1 @@
-bf15628b759344c6fc7763795a405ba65b8be5d7
+96578f73d04e1a231fa2a495ad3fa97747785bc6
diff --git a/health/health.go b/health/health.go
index 3a23a126b4deb..079b3195c8e86 100644
--- a/health/health.go
+++ b/health/health.go
@@ -128,9 +128,6 @@ const (
// SysDNS is the name of the net/dns subsystem.
SysDNS = Subsystem("dns")
- // SysDNSOS is the name of the net/dns OSConfigurator subsystem.
- SysDNSOS = Subsystem("dns-os")
-
// SysDNSManager is the name of the net/dns manager subsystem.
SysDNSManager = Subsystem("dns-manager")
@@ -141,7 +138,7 @@ const (
var subsystemsWarnables = map[Subsystem]*Warnable{}
func init() {
- for _, s := range []Subsystem{SysRouter, SysDNS, SysDNSOS, SysDNSManager, SysTKA} {
+ for _, s := range []Subsystem{SysRouter, SysDNS, SysDNSManager, SysTKA} {
w := Register(&Warnable{
Code: WarnableCode(s),
Severity: SeverityMedium,
@@ -334,7 +331,7 @@ func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) {
)
t.metricHealthMessage.Set(metricHealthMessageLabel{
- Type: "warning",
+ Type: MetricLabelWarning,
}, expvar.Func(func() any {
if t.nil() {
return 0
@@ -510,22 +507,12 @@ func (t *Tracker) SetDNSHealth(err error) { t.setErr(SysDNS, err) }
// Deprecated: Warnables should be preferred over Subsystem errors.
func (t *Tracker) DNSHealth() error { return t.get(SysDNS) }
-// SetDNSOSHealth sets the state of the net/dns.OSConfigurator
-//
-// Deprecated: Warnables should be preferred over Subsystem errors.
-func (t *Tracker) SetDNSOSHealth(err error) { t.setErr(SysDNSOS, err) }
-
// SetDNSManagerHealth sets the state of the Linux net/dns manager's
// discovery of the /etc/resolv.conf situation.
//
// Deprecated: Warnables should be preferred over Subsystem errors.
func (t *Tracker) SetDNSManagerHealth(err error) { t.setErr(SysDNSManager, err) }
-// DNSOSHealth returns the net/dns.OSConfigurator error state.
-//
-// Deprecated: Warnables should be preferred over Subsystem errors.
-func (t *Tracker) DNSOSHealth() error { return t.get(SysDNSOS) }
-
// SetTKAHealth sets the health of the tailnet key authority.
//
// Deprecated: Warnables should be preferred over Subsystem errors.
@@ -1296,6 +1283,8 @@ func (t *Tracker) LastNoiseDialWasRecent() bool {
return dur < 2*time.Minute
}
+const MetricLabelWarning = "warning"
+
type metricHealthMessageLabel struct {
// TODO: break down by warnable.severity as well?
Type string
diff --git a/health/health_test.go b/health/health_test.go
index 8107c1cf09db5..ebdddc988edc7 100644
--- a/health/health_test.go
+++ b/health/health_test.go
@@ -7,11 +7,14 @@ import (
"fmt"
"reflect"
"slices"
+ "strconv"
"testing"
"time"
"tailscale.com/tailcfg"
"tailscale.com/types/opt"
+ "tailscale.com/util/usermetric"
+ "tailscale.com/version"
)
func TestAppendWarnableDebugFlags(t *testing.T) {
@@ -273,7 +276,7 @@ func TestShowUpdateWarnable(t *testing.T) {
wantShow bool
}{
{
- desc: "nil CientVersion",
+ desc: "nil ClientVersion",
check: true,
cv: nil,
wantWarnable: nil,
@@ -348,3 +351,52 @@ func TestShowUpdateWarnable(t *testing.T) {
})
}
}
+
+func TestHealthMetric(t *testing.T) {
+ unstableBuildWarning := 0
+ if version.IsUnstableBuild() {
+ unstableBuildWarning = 1
+ }
+
+ tests := []struct {
+ desc string
+ check bool
+ apply opt.Bool
+ cv *tailcfg.ClientVersion
+ wantMetricCount int
+ }{
+ // When running in dev, and not initialising the client, there will be two warnings
+ // by default:
+ // - is-using-unstable-version (except on the release branch)
+ // - wantrunning-false
+ {
+ desc: "base-warnings",
+ check: true,
+ cv: nil,
+ wantMetricCount: unstableBuildWarning + 1,
+ },
+ // with: update-available
+ {
+ desc: "update-warning",
+ check: true,
+ cv: &tailcfg.ClientVersion{RunningLatest: false, LatestVersion: "1.2.3"},
+ wantMetricCount: unstableBuildWarning + 2,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ tr := &Tracker{
+ checkForUpdates: tt.check,
+ applyUpdates: tt.apply,
+ latestVersion: tt.cv,
+ }
+ tr.SetMetricsRegistry(&usermetric.Registry{})
+ if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) {
+ t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount))
+ }
+ for _, w := range tr.CurrentState().Warnings {
+ t.Logf("warning: %v", w)
+ }
+ })
+ }
+}
diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go
index 1f9037829d82d..3d4216922a12b 100644
--- a/hostinfo/hostinfo.go
+++ b/hostinfo/hostinfo.go
@@ -25,7 +25,7 @@ import (
"tailscale.com/types/ptr"
"tailscale.com/util/cloudenv"
"tailscale.com/util/dnsname"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
"tailscale.com/version"
"tailscale.com/version/distro"
)
@@ -231,12 +231,12 @@ func desktop() (ret opt.Bool) {
}
seenDesktop := false
- lineread.File("/proc/net/unix", func(line []byte) error {
+ for lr := range lineiter.File("/proc/net/unix") {
+ line, _ := lr.Value()
seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S(" @/tmp/dbus-"))
seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S(".X11-unix"))
seenDesktop = seenDesktop || mem.Contains(mem.B(line), mem.S("/wayland-1"))
- return nil
- })
+ }
ret.Set(seenDesktop)
// Only cache after a minute - compositors might not have started yet.
@@ -280,13 +280,22 @@ func getEnvType() EnvType {
return ""
}
-// inContainer reports whether we're running in a container.
+// inContainer reports whether we're running in a container. Best-effort only,
+// there's no foolproof way to detect this, but the build tag should catch all
+// official builds from 1.78.0.
func inContainer() opt.Bool {
if runtime.GOOS != "linux" {
return ""
}
var ret opt.Bool
ret.Set(false)
+ if packageType != nil && packageType() == "container" {
+ // Go build tag ts_package_container was set during build.
+ ret.Set(true)
+ return ret
+ }
+ // Only set if using docker's container runtime. Not guaranteed by
+ // documentation, but it's been in place for a long time.
if _, err := os.Stat("/.dockerenv"); err == nil {
ret.Set(true)
return ret
@@ -296,21 +305,21 @@ func inContainer() opt.Bool {
ret.Set(true)
return ret
}
- lineread.File("/proc/1/cgroup", func(line []byte) error {
+ for lr := range lineiter.File("/proc/1/cgroup") {
+ line, _ := lr.Value()
if mem.Contains(mem.B(line), mem.S("/docker/")) ||
mem.Contains(mem.B(line), mem.S("/lxc/")) {
ret.Set(true)
- return io.EOF // arbitrary non-nil error to stop loop
+ break
}
- return nil
- })
- lineread.File("/proc/mounts", func(line []byte) error {
+ }
+ for lr := range lineiter.File("/proc/mounts") {
+ line, _ := lr.Value()
if mem.Contains(mem.B(line), mem.S("lxcfs /proc/cpuinfo fuse.lxcfs")) {
ret.Set(true)
- return io.EOF
+ break
}
- return nil
- })
+ }
return ret
}
@@ -362,7 +371,7 @@ func inFlyDotIo() bool {
}
func inReplit() bool {
- // https://docs.replit.com/programming-ide/getting-repl-metadata
+ // https://docs.replit.com/replit-workspace/configuring-repl#environment-variables
if os.Getenv("REPL_OWNER") != "" && os.Getenv("REPL_SLUG") != "" {
return true
}
diff --git a/hostinfo/hostinfo_container_linux_test.go b/hostinfo/hostinfo_container_linux_test.go
new file mode 100644
index 0000000000000..594a5f5120a6a
--- /dev/null
+++ b/hostinfo/hostinfo_container_linux_test.go
@@ -0,0 +1,16 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build linux && !android && ts_package_container
+
+package hostinfo
+
+import (
+ "testing"
+)
+
+func TestInContainer(t *testing.T) {
+ if got := inContainer(); !got.EqualBool(true) {
+ t.Errorf("inContainer = %v; want true due to ts_package_container build tag", got)
+ }
+}
diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go
index 53d4187bc0c67..66484a3588027 100644
--- a/hostinfo/hostinfo_linux.go
+++ b/hostinfo/hostinfo_linux.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/sys/unix"
"tailscale.com/types/ptr"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
"tailscale.com/version/distro"
)
@@ -106,15 +106,18 @@ func linuxVersionMeta() (meta versionMeta) {
}
m := map[string]string{}
- lineread.File(propFile, func(line []byte) error {
+ for lr := range lineiter.File(propFile) {
+ line, err := lr.Value()
+ if err != nil {
+ break
+ }
eq := bytes.IndexByte(line, '=')
if eq == -1 {
- return nil
+ continue
}
k, v := string(line[:eq]), strings.Trim(string(line[eq+1:]), `"'`)
m[k] = v
- return nil
- })
+ }
if v := m["VERSION_CODENAME"]; v != "" {
meta.DistroCodeName = v
diff --git a/hostinfo/hostinfo_linux_test.go b/hostinfo/hostinfo_linux_test.go
index 4859167a270ec..0286fadf329ab 100644
--- a/hostinfo/hostinfo_linux_test.go
+++ b/hostinfo/hostinfo_linux_test.go
@@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
-//go:build linux && !android
+//go:build linux && !android && !ts_package_container
package hostinfo
@@ -34,3 +34,13 @@ remotes/origin/QTSFW_5.0.0`
t.Errorf("got %q; want %q", got, want)
}
}
+
+func TestPackageTypeNotContainer(t *testing.T) {
+ var got string
+ if packageType != nil {
+ got = packageType()
+ }
+ if got == "container" {
+ t.Fatal("packageType = container; should only happen if build tag ts_package_container is set")
+ }
+}
diff --git a/ipn/backend.go b/ipn/backend.go
index 76ad1910bf14c..91a35df0d0da0 100644
--- a/ipn/backend.go
+++ b/ipn/backend.go
@@ -73,6 +73,8 @@ const (
NotifyInitialOutgoingFiles // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles
NotifyInitialHealthState // if set, the first Notify message (sent immediately) will contain the current health.State of the client
+
+ NotifyRateLimit // if set, rate limit spammy netmap updates to every few seconds
)
// Notify is a communication from a backend (e.g. tailscaled) to a frontend
@@ -100,7 +102,6 @@ type Notify struct {
NetMap *netmap.NetworkMap // if non-nil, the new or current netmap
Engine *EngineStatus // if non-nil, the new or current wireguard stats
BrowseToURL *string // if non-nil, UI should open a browser right now
- BackendLogID *string // if non-nil, the public logtail ID used by backend
// FilesWaiting if non-nil means that files are buffered in
// the Tailscale daemon and ready for local transfer to the
@@ -173,9 +174,6 @@ func (n Notify) String() string {
if n.BrowseToURL != nil {
sb.WriteString("URL=<...> ")
}
- if n.BackendLogID != nil {
- sb.WriteString("BackendLogID ")
- }
if n.FilesWaiting != nil {
sb.WriteString("FilesWaiting ")
}
diff --git a/ipn/conf.go b/ipn/conf.go
index 6a67f40040c76..1b2831b03b6c6 100644
--- a/ipn/conf.go
+++ b/ipn/conf.go
@@ -32,6 +32,8 @@ type ConfigVAlpha struct {
AdvertiseRoutes []netip.Prefix `json:",omitempty"`
DisableSNAT opt.Bool `json:",omitempty"`
+ AppConnector *AppConnectorPrefs `json:",omitempty"` // advertise app connector; defaults to false (if nil or explicitly set to false)
+
NetfilterMode *string `json:",omitempty"` // "on", "off", "nodivert"
NoStatefulFiltering opt.Bool `json:",omitempty"`
@@ -137,5 +139,9 @@ func (c *ConfigVAlpha) ToPrefs() (MaskedPrefs, error) {
mp.AutoUpdate = *c.AutoUpdate
mp.AutoUpdateSet = AutoUpdatePrefsMask{ApplySet: true, CheckSet: true}
}
+ if c.AppConnector != nil {
+ mp.AppConnector = *c.AppConnector
+ mp.AppConnectorSet = true
+ }
return mp, nil
}
diff --git a/ipn/doc.go b/ipn/doc.go
index 4b3810be1f734..9a0bbb800b556 100644
--- a/ipn/doc.go
+++ b/ipn/doc.go
@@ -1,7 +1,7 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
-//go:generate go run tailscale.com/cmd/viewer -type=Prefs,ServeConfig,TCPPortHandler,HTTPHandler,WebServerConfig
+//go:generate go run tailscale.com/cmd/viewer -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig
// Package ipn implements the interactions between the Tailscale cloud
// control plane and the local network stack.
diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go
index de35b60a7927d..34d7ba9a66364 100644
--- a/ipn/ipn_clone.go
+++ b/ipn/ipn_clone.go
@@ -27,6 +27,7 @@ func (src *Prefs) Clone() *Prefs {
*dst = *src
dst.AdvertiseTags = append(src.AdvertiseTags[:0:0], src.AdvertiseTags...)
dst.AdvertiseRoutes = append(src.AdvertiseRoutes[:0:0], src.AdvertiseRoutes...)
+ dst.AdvertiseServices = append(src.AdvertiseServices[:0:0], src.AdvertiseServices...)
if src.DriveShares != nil {
dst.DriveShares = make([]*drive.Share, len(src.DriveShares))
for i := range dst.DriveShares {
@@ -61,6 +62,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct {
ForceDaemon bool
Egg bool
AdvertiseRoutes []netip.Prefix
+ AdvertiseServices []string
NoSNAT bool
NoStatefulFiltering opt.Bool
NetfilterMode preftype.NetfilterMode
@@ -103,6 +105,16 @@ func (src *ServeConfig) Clone() *ServeConfig {
}
}
}
+ if dst.Services != nil {
+ dst.Services = map[string]*ServiceConfig{}
+ for k, v := range src.Services {
+ if v == nil {
+ dst.Services[k] = nil
+ } else {
+ dst.Services[k] = v.Clone()
+ }
+ }
+ }
dst.AllowFunnel = maps.Clone(src.AllowFunnel)
if dst.Foreground != nil {
dst.Foreground = map[string]*ServeConfig{}
@@ -121,11 +133,50 @@ func (src *ServeConfig) Clone() *ServeConfig {
var _ServeConfigCloneNeedsRegeneration = ServeConfig(struct {
TCP map[uint16]*TCPPortHandler
Web map[HostPort]*WebServerConfig
+ Services map[string]*ServiceConfig
AllowFunnel map[HostPort]bool
Foreground map[string]*ServeConfig
ETag string
}{})
+// Clone makes a deep copy of ServiceConfig.
+// The result aliases no memory with the original.
+func (src *ServiceConfig) Clone() *ServiceConfig {
+ if src == nil {
+ return nil
+ }
+ dst := new(ServiceConfig)
+ *dst = *src
+ if dst.TCP != nil {
+ dst.TCP = map[uint16]*TCPPortHandler{}
+ for k, v := range src.TCP {
+ if v == nil {
+ dst.TCP[k] = nil
+ } else {
+ dst.TCP[k] = ptr.To(*v)
+ }
+ }
+ }
+ if dst.Web != nil {
+ dst.Web = map[HostPort]*WebServerConfig{}
+ for k, v := range src.Web {
+ if v == nil {
+ dst.Web[k] = nil
+ } else {
+ dst.Web[k] = v.Clone()
+ }
+ }
+ }
+ return dst
+}
+
+// A compilation failure here means this code must be regenerated, with the command at the top of this file.
+var _ServiceConfigCloneNeedsRegeneration = ServiceConfig(struct {
+ TCP map[uint16]*TCPPortHandler
+ Web map[HostPort]*WebServerConfig
+ Tun bool
+}{})
+
// Clone makes a deep copy of TCPPortHandler.
// The result aliases no memory with the original.
func (src *TCPPortHandler) Clone() *TCPPortHandler {
diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go
index ff48b9c8975f9..bc67531e4253d 100644
--- a/ipn/ipn_view.go
+++ b/ipn/ipn_view.go
@@ -18,7 +18,7 @@ import (
"tailscale.com/types/views"
)
-//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,TCPPortHandler,HTTPHandler,WebServerConfig
+//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig
// View returns a readonly view of Prefs.
func (p *Prefs) View() PrefsView {
@@ -85,6 +85,9 @@ func (v PrefsView) Egg() bool { return v.ж.Eg
func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] {
return views.SliceOf(v.ж.AdvertiseRoutes)
}
+func (v PrefsView) AdvertiseServices() views.Slice[string] {
+ return views.SliceOf(v.ж.AdvertiseServices)
+}
func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT }
func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering }
func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode }
@@ -120,6 +123,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct {
ForceDaemon bool
Egg bool
AdvertiseRoutes []netip.Prefix
+ AdvertiseServices []string
NoSNAT bool
NoStatefulFiltering opt.Bool
NetfilterMode preftype.NetfilterMode
@@ -191,6 +195,12 @@ func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServer
})
}
+func (v ServeConfigView) Services() views.MapFn[string, *ServiceConfig, ServiceConfigView] {
+ return views.MapFnOf(v.ж.Services, func(t *ServiceConfig) ServiceConfigView {
+ return t.View()
+ })
+}
+
func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] {
return views.MapOf(v.ж.AllowFunnel)
}
@@ -206,11 +216,77 @@ func (v ServeConfigView) ETag() string { return v.ж.ETag }
var _ServeConfigViewNeedsRegeneration = ServeConfig(struct {
TCP map[uint16]*TCPPortHandler
Web map[HostPort]*WebServerConfig
+ Services map[string]*ServiceConfig
AllowFunnel map[HostPort]bool
Foreground map[string]*ServeConfig
ETag string
}{})
+// View returns a readonly view of ServiceConfig.
+func (p *ServiceConfig) View() ServiceConfigView {
+ return ServiceConfigView{ж: p}
+}
+
+// ServiceConfigView provides a read-only view over ServiceConfig.
+//
+// Its methods should only be called if `Valid()` returns true.
+type ServiceConfigView struct {
+ // ж is the underlying mutable value, named with a hard-to-type
+ // character that looks pointy like a pointer.
+ // It is named distinctively to make you think of how dangerous it is to escape
+ // to callers. You must not let callers be able to mutate it.
+ ж *ServiceConfig
+}
+
+// Valid reports whether underlying value is non-nil.
+func (v ServiceConfigView) Valid() bool { return v.ж != nil }
+
+// AsStruct returns a clone of the underlying value which aliases no memory with
+// the original.
+func (v ServiceConfigView) AsStruct() *ServiceConfig {
+ if v.ж == nil {
+ return nil
+ }
+ return v.ж.Clone()
+}
+
+func (v ServiceConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
+
+func (v *ServiceConfigView) UnmarshalJSON(b []byte) error {
+ if v.ж != nil {
+ return errors.New("already initialized")
+ }
+ if len(b) == 0 {
+ return nil
+ }
+ var x ServiceConfig
+ if err := json.Unmarshal(b, &x); err != nil {
+ return err
+ }
+ v.ж = &x
+ return nil
+}
+
+func (v ServiceConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] {
+ return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView {
+ return t.View()
+ })
+}
+
+func (v ServiceConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] {
+ return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView {
+ return t.View()
+ })
+}
+func (v ServiceConfigView) Tun() bool { return v.ж.Tun }
+
+// A compilation failure here means this code must be regenerated, with the command at the top of this file.
+var _ServiceConfigViewNeedsRegeneration = ServiceConfig(struct {
+ TCP map[uint16]*TCPPortHandler
+ Web map[HostPort]*WebServerConfig
+ Tun bool
+}{})
+
// View returns a readonly view of TCPPortHandler.
func (p *TCPPortHandler) View() TCPPortHandlerView {
return TCPPortHandlerView{ж: p}
diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go
index db3192c9100ad..1070172688a84 100644
--- a/ipn/ipnauth/actor.go
+++ b/ipn/ipnauth/actor.go
@@ -4,6 +4,8 @@
package ipnauth
import (
+ "fmt"
+
"tailscale.com/ipn"
)
@@ -20,6 +22,9 @@ type Actor interface {
// Username returns the user name associated with the receiver,
// or "" if the actor does not represent a specific user.
Username() (string, error)
+ // ClientID returns a non-zero ClientID and true if the actor represents
+ // a connected LocalAPI client. Otherwise, it returns a zero value and false.
+ ClientID() (_ ClientID, ok bool)
// IsLocalSystem reports whether the actor is the Windows' Local System account.
//
@@ -45,3 +50,29 @@ type ActorCloser interface {
// Close releases resources associated with the receiver.
Close() error
}
+
+// ClientID is an opaque, comparable value used to identify a connected LocalAPI
+// client, such as a connected Tailscale GUI or CLI. It does not necessarily
+// correspond to the same [net.Conn] or any physical session.
+//
+// Its zero value is valid, but does not represent a specific connected client.
+type ClientID struct {
+ v any
+}
+
+// NoClientID is the zero value of [ClientID].
+var NoClientID ClientID
+
+// ClientIDFrom returns a new [ClientID] derived from the specified value.
+// ClientIDs derived from equal values are equal.
+func ClientIDFrom[T comparable](v T) ClientID {
+ return ClientID{v}
+}
+
+// String implements [fmt.Stringer].
+func (id ClientID) String() string {
+ if id.v == nil {
+ return "(none)"
+ }
+ return fmt.Sprint(id.v)
+}
diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_notwindows.go
index 3dad8233a2198..d9d11bd0a17a1 100644
--- a/ipn/ipnauth/ipnauth_notwindows.go
+++ b/ipn/ipnauth/ipnauth_notwindows.go
@@ -18,7 +18,9 @@ import (
func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) {
ci = &ConnIdentity{conn: c, notWindows: true}
_, ci.isUnixSock = c.(*net.UnixConn)
- ci.creds, _ = peercred.Get(c)
+ if ci.creds, _ = peercred.Get(c); ci.creds != nil {
+ ci.pid, _ = ci.creds.PID()
+ }
return ci, nil
}
diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go
new file mode 100644
index 0000000000000..d38aa21968bb2
--- /dev/null
+++ b/ipn/ipnauth/test_actor.go
@@ -0,0 +1,36 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package ipnauth
+
+import (
+ "tailscale.com/ipn"
+)
+
+var _ Actor = (*TestActor)(nil)
+
+// TestActor is an [Actor] used exclusively for testing purposes.
+type TestActor struct {
+ UID ipn.WindowsUserID // OS-specific UID of the user, if the actor represents a local Windows user
+ Name string // username associated with the actor, or ""
+ NameErr error // error to be returned by [TestActor.Username]
+ CID ClientID // non-zero if the actor represents a connected LocalAPI client
+ LocalSystem bool // whether the actor represents the special Local System account on Windows
+ LocalAdmin bool // whether the actor has local admin access
+
+}
+
+// UserID implements [Actor].
+func (a *TestActor) UserID() ipn.WindowsUserID { return a.UID }
+
+// Username implements [Actor].
+func (a *TestActor) Username() (string, error) { return a.Name, a.NameErr }
+
+// ClientID implements [Actor].
+func (a *TestActor) ClientID() (_ ClientID, ok bool) { return a.CID, a.CID != NoClientID }
+
+// IsLocalSystem implements [Actor].
+func (a *TestActor) IsLocalSystem() bool { return a.LocalSystem }
+
+// IsLocalAdmin implements [Actor].
+func (a *TestActor) IsLocalAdmin(operatorUID string) bool { return a.LocalAdmin }
diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go
new file mode 100644
index 0000000000000..111a877d849d8
--- /dev/null
+++ b/ipn/ipnlocal/bus.go
@@ -0,0 +1,160 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package ipnlocal
+
+import (
+ "context"
+ "time"
+
+ "tailscale.com/ipn"
+ "tailscale.com/tstime"
+)
+
+type rateLimitingBusSender struct {
+ fn func(*ipn.Notify) (keepGoing bool)
+ lastFlush time.Time // last call to fn, or zero value if none
+ interval time.Duration // 0 to flush immediately; non-zero to rate limit sends
+ clock tstime.DefaultClock // non-nil for testing
+ didSendTestHook func() // non-nil for testing
+
+ // pending, if non-nil, is the pending notification that we
+ // haven't sent yet. We own this memory to mutate.
+ pending *ipn.Notify
+
+ // flushTimer is non-nil if the timer is armed.
+ flushTimer tstime.TimerController // effectively a *time.Timer
+ flushTimerC <-chan time.Time // ... said ~Timer's C chan
+}
+
+func (s *rateLimitingBusSender) close() {
+ if s.flushTimer != nil {
+ s.flushTimer.Stop()
+ }
+}
+
+func (s *rateLimitingBusSender) flushChan() <-chan time.Time {
+ return s.flushTimerC
+}
+
+func (s *rateLimitingBusSender) flush() (keepGoing bool) {
+ if n := s.pending; n != nil {
+ s.pending = nil
+ return s.flushNotify(n)
+ }
+ return true
+}
+
+func (s *rateLimitingBusSender) flushNotify(n *ipn.Notify) (keepGoing bool) {
+ s.lastFlush = s.clock.Now()
+ return s.fn(n)
+}
+
+// send conditionally sends n to the underlying fn, possibly rate
+// limiting it, depending on whether s.interval is set, and whether
+// n is a notable notification that the client (typically a GUI) would
+// want to act on (render) immediately.
+//
+// It returns whether the caller should keep looping.
+//
+// The passed-in memory 'n' is owned by the caller and should
+// not be mutated.
+func (s *rateLimitingBusSender) send(n *ipn.Notify) (keepGoing bool) {
+ if s.interval <= 0 {
+ // No rate limiting case.
+ return s.fn(n)
+ }
+ if isNotableNotify(n) {
+ // Notable notifications are always sent immediately.
+ // But first send any boring one that was pending.
+ // TODO(bradfitz): there might be a boring one pending
+ // with a NetMap or Engine field that is redundant
+ // with the new one (n) with NetMap or Engine populated.
+ // We should clear the pending one's NetMap/Engine in
+ // that case. Or really, merge the two, but mergeBoringNotifies
+ // only handles the case of both sides being boring.
+ // So for now, flush both.
+ if !s.flush() {
+ return false
+ }
+ return s.flushNotify(n)
+ }
+ s.pending = mergeBoringNotifies(s.pending, n)
+ d := s.clock.Now().Sub(s.lastFlush)
+ if d > s.interval {
+ return s.flush()
+ }
+ nextFlushIn := s.interval - d
+ if s.flushTimer == nil {
+ s.flushTimer, s.flushTimerC = s.clock.NewTimer(nextFlushIn)
+ } else {
+ s.flushTimer.Reset(nextFlushIn)
+ }
+ return true
+}
+
+func (s *rateLimitingBusSender) Run(ctx context.Context, ch <-chan *ipn.Notify) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case n, ok := <-ch:
+ if !ok {
+ return
+ }
+ if !s.send(n) {
+ return
+ }
+ if f := s.didSendTestHook; f != nil {
+ f()
+ }
+ case <-s.flushChan():
+ if !s.flush() {
+ return
+ }
+ }
+ }
+}
+
+// mergeBoringNotify merges new notify 'src' into possibly-nil 'dst',
+// either mutating 'dst' or allocating a new one if 'dst' is nil,
+// returning the merged result.
+//
+// dst and src must both be "boring" (i.e. not notable per isNotifiableNotify).
+func mergeBoringNotifies(dst, src *ipn.Notify) *ipn.Notify {
+ if dst == nil {
+ dst = &ipn.Notify{Version: src.Version}
+ }
+ if src.NetMap != nil {
+ dst.NetMap = src.NetMap
+ }
+ if src.Engine != nil {
+ dst.Engine = src.Engine
+ }
+ return dst
+}
+
+// isNotableNotify reports whether n is a "notable" notification that
+// should be sent on the IPN bus immediately (e.g. to GUIs) without
+// rate limiting it for a few seconds.
+//
+// It effectively reports whether n contains any field set that's
+// not NetMap or Engine.
+func isNotableNotify(n *ipn.Notify) bool {
+ if n == nil {
+ return false
+ }
+ return n.State != nil ||
+ n.SessionID != "" ||
+ n.BrowseToURL != nil ||
+ n.LocalTCPPort != nil ||
+ n.ClientVersion != nil ||
+ n.Prefs != nil ||
+ n.ErrMessage != nil ||
+ n.LoginFinished != nil ||
+ !n.DriveShares.IsNil() ||
+ n.Health != nil ||
+ len(n.IncomingFiles) > 0 ||
+ len(n.OutgoingFiles) > 0 ||
+ n.FilesWaiting != nil
+}
diff --git a/ipn/ipnlocal/bus_test.go b/ipn/ipnlocal/bus_test.go
new file mode 100644
index 0000000000000..5c75ac54d688d
--- /dev/null
+++ b/ipn/ipnlocal/bus_test.go
@@ -0,0 +1,220 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package ipnlocal
+
+import (
+ "context"
+ "reflect"
+ "slices"
+ "testing"
+ "time"
+
+ "tailscale.com/drive"
+ "tailscale.com/ipn"
+ "tailscale.com/tstest"
+ "tailscale.com/tstime"
+ "tailscale.com/types/logger"
+ "tailscale.com/types/netmap"
+ "tailscale.com/types/views"
+)
+
+func TestIsNotableNotify(t *testing.T) {
+ tests := []struct {
+ name string
+ notify *ipn.Notify
+ want bool
+ }{
+ {"nil", nil, false},
+ {"empty", &ipn.Notify{}, false},
+ {"version", &ipn.Notify{Version: "foo"}, false},
+ {"netmap", &ipn.Notify{NetMap: new(netmap.NetworkMap)}, false},
+ {"engine", &ipn.Notify{Engine: new(ipn.EngineStatus)}, false},
+ }
+
+ // Then for all other fields, assume they're notable.
+ // We use reflect to catch fields that might be added in the future without
+ // remembering to update the [isNotableNotify] function.
+ rt := reflect.TypeFor[ipn.Notify]()
+ for i := range rt.NumField() {
+ n := &ipn.Notify{}
+ sf := rt.Field(i)
+ switch sf.Name {
+ case "_", "NetMap", "Engine", "Version":
+ // Already covered above or not applicable.
+ continue
+ case "DriveShares":
+ n.DriveShares = views.SliceOfViews[*drive.Share, drive.ShareView](make([]*drive.Share, 1))
+ default:
+ rf := reflect.ValueOf(n).Elem().Field(i)
+ switch rf.Kind() {
+ case reflect.Pointer:
+ rf.Set(reflect.New(rf.Type().Elem()))
+ case reflect.String:
+ rf.SetString("foo")
+ case reflect.Slice:
+ rf.Set(reflect.MakeSlice(rf.Type(), 1, 1))
+ default:
+ t.Errorf("unhandled field kind %v for %q", rf.Kind(), sf.Name)
+ }
+ }
+
+ tests = append(tests, struct {
+ name string
+ notify *ipn.Notify
+ want bool
+ }{
+ name: "field-" + rt.Field(i).Name,
+ notify: n,
+ want: true,
+ })
+ }
+
+ for _, tt := range tests {
+ if got := isNotableNotify(tt.notify); got != tt.want {
+ t.Errorf("%v: got %v; want %v", tt.name, got, tt.want)
+ }
+ }
+}
+
+type rateLimitingBusSenderTester struct {
+ tb testing.TB
+ got []*ipn.Notify
+ clock *tstest.Clock
+ s *rateLimitingBusSender
+}
+
+func (st *rateLimitingBusSenderTester) init() {
+ if st.s != nil {
+ return
+ }
+ st.clock = tstest.NewClock(tstest.ClockOpts{
+ Start: time.Unix(1731777537, 0), // time I wrote this test :)
+ })
+ st.s = &rateLimitingBusSender{
+ clock: tstime.DefaultClock{Clock: st.clock},
+ fn: func(n *ipn.Notify) bool {
+ st.got = append(st.got, n)
+ return true
+ },
+ }
+}
+
+func (st *rateLimitingBusSenderTester) send(n *ipn.Notify) {
+ st.tb.Helper()
+ st.init()
+ if !st.s.send(n) {
+ st.tb.Fatal("unexpected send failed")
+ }
+}
+
+func (st *rateLimitingBusSenderTester) advance(d time.Duration) {
+ st.tb.Helper()
+ st.clock.Advance(d)
+ select {
+ case <-st.s.flushChan():
+ if !st.s.flush() {
+ st.tb.Fatal("unexpected flush failed")
+ }
+ default:
+ }
+}
+
+func TestRateLimitingBusSender(t *testing.T) {
+ nm1 := &ipn.Notify{NetMap: new(netmap.NetworkMap)}
+ nm2 := &ipn.Notify{NetMap: new(netmap.NetworkMap)}
+ eng1 := &ipn.Notify{Engine: new(ipn.EngineStatus)}
+ eng2 := &ipn.Notify{Engine: new(ipn.EngineStatus)}
+
+ t.Run("unbuffered", func(t *testing.T) {
+ st := &rateLimitingBusSenderTester{tb: t}
+ st.send(nm1)
+ st.send(nm2)
+ st.send(eng1)
+ st.send(eng2)
+ if !slices.Equal(st.got, []*ipn.Notify{nm1, nm2, eng1, eng2}) {
+ t.Errorf("got %d items; want 4 specific ones, unmodified", len(st.got))
+ }
+ })
+
+ t.Run("buffered", func(t *testing.T) {
+ st := &rateLimitingBusSenderTester{tb: t}
+ st.init()
+ st.s.interval = 1 * time.Second
+ st.send(&ipn.Notify{Version: "initial"})
+ if len(st.got) != 1 {
+ t.Fatalf("got %d items; expected 1 (first to flush immediately)", len(st.got))
+ }
+ st.send(nm1)
+ st.send(nm2)
+ st.send(eng1)
+ st.send(eng2)
+ if len(st.got) != 1 {
+ if len(st.got) != 1 {
+ t.Fatalf("got %d items; expected still just that first 1", len(st.got))
+ }
+ }
+
+ // But moving the clock should flush the rest, collasced into one new one.
+ st.advance(5 * time.Second)
+ if len(st.got) != 2 {
+ t.Fatalf("got %d items; want 2", len(st.got))
+ }
+ gotn := st.got[1]
+ if gotn.NetMap != nm2.NetMap {
+ t.Errorf("got wrong NetMap; got %p", gotn.NetMap)
+ }
+ if gotn.Engine != eng2.Engine {
+ t.Errorf("got wrong Engine; got %p", gotn.Engine)
+ }
+ if t.Failed() {
+ t.Logf("failed Notify was: %v", logger.AsJSON(gotn))
+ }
+ })
+
+ // Test the Run method
+ t.Run("run", func(t *testing.T) {
+ st := &rateLimitingBusSenderTester{tb: t}
+ st.init()
+ st.s.interval = 1 * time.Second
+ st.s.lastFlush = st.clock.Now() // pretend we just flushed
+
+ flushc := make(chan *ipn.Notify, 1)
+ st.s.fn = func(n *ipn.Notify) bool {
+ flushc <- n
+ return true
+ }
+ didSend := make(chan bool, 2)
+ st.s.didSendTestHook = func() { didSend <- true }
+ waitSend := func() {
+ select {
+ case <-didSend:
+ case <-time.After(5 * time.Second):
+ t.Error("timeout waiting for call to send")
+ }
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ incoming := make(chan *ipn.Notify, 2)
+ go func() {
+ incoming <- nm1
+ waitSend()
+ incoming <- nm2
+ waitSend()
+ st.advance(5 * time.Second)
+ select {
+ case n := <-flushc:
+ if n.NetMap != nm2.NetMap {
+ t.Errorf("got wrong NetMap; got %p", n.NetMap)
+ }
+ case <-time.After(10 * time.Second):
+ t.Error("timeout")
+ }
+ cancel()
+ }()
+
+ st.s.Run(ctx, incoming)
+ })
+}
diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go
index de6ca2321a741..f3a4a3a3d2b29 100644
--- a/ipn/ipnlocal/c2n.go
+++ b/ipn/ipnlocal/c2n.go
@@ -77,6 +77,9 @@ var c2nHandlers = map[methodAndPath]c2nHandler{
// Linux netfilter.
req("POST /netfilter-kind"): handleC2NSetNetfilterKind,
+
+ // VIP services.
+ req("GET /vip-services"): handleC2NVIPServicesGet,
}
type c2nHandler func(*LocalBackend, http.ResponseWriter, *http.Request)
@@ -269,6 +272,12 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R
w.WriteHeader(http.StatusNoContent)
}
+func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
+ b.logf("c2n: GET /vip-services received")
+
+ json.NewEncoder(w).Encode(b.VIPServices())
+}
+
func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) {
b.logf("c2n: GET /update received")
@@ -332,12 +341,10 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http
}
if choice.ShouldEnable(b.Prefs().PostureChecking()) {
- sns, err := posture.GetSerialNumbers(b.logf)
+ res.SerialNumbers, err = posture.GetSerialNumbers(b.logf)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ b.logf("c2n: GetSerialNumbers returned error: %v", err)
}
- res.SerialNumbers = sns
// TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release
// and looks good in client metrics, remove this parameter and always report MAC
@@ -352,6 +359,8 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http
res.PostureDisabled = true
}
+ b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs))
+
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go
index 98d563d8746b1..fe3622ba40e3e 100644
--- a/ipn/ipnlocal/drive.go
+++ b/ipn/ipnlocal/drive.go
@@ -354,9 +354,8 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem
// Check that the peer is allowed to share with us.
addresses := peer.Addresses()
- for i := range addresses.Len() {
- addr := addresses.At(i)
- capsMap := b.PeerCaps(addr.Addr())
+ for _, p := range addresses.All() {
+ capsMap := b.PeerCaps(p.Addr())
if capsMap.HasCapability(tailcfg.PeerCapabilityTaildriveSharer) {
return true
}
diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go
index 5e7ccc1cb973c..3b9ba337270c5 100644
--- a/ipn/ipnlocal/local.go
+++ b/ipn/ipnlocal/local.go
@@ -9,6 +9,7 @@ import (
"bytes"
"cmp"
"context"
+ "crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
@@ -51,6 +52,7 @@ import (
"tailscale.com/doctor/routetable"
"tailscale.com/drive"
"tailscale.com/envknob"
+ "tailscale.com/envknob/featureknob"
"tailscale.com/health"
"tailscale.com/health/healthmsg"
"tailscale.com/hostinfo"
@@ -85,7 +87,6 @@ import (
"tailscale.com/types/dnstype"
"tailscale.com/types/empty"
"tailscale.com/types/key"
- "tailscale.com/types/lazy"
"tailscale.com/types/logger"
"tailscale.com/types/logid"
"tailscale.com/types/netmap"
@@ -104,6 +105,7 @@ import (
"tailscale.com/util/rands"
"tailscale.com/util/set"
"tailscale.com/util/syspolicy"
+ "tailscale.com/util/syspolicy/rsop"
"tailscale.com/util/systemd"
"tailscale.com/util/testenv"
"tailscale.com/util/uniq"
@@ -154,10 +156,12 @@ func RegisterNewSSHServer(fn newSSHServerFunc) {
newSSHServer = fn
}
-// watchSession represents a WatchNotifications channel
+// watchSession represents a WatchNotifications channel,
+// an [ipnauth.Actor] that owns it (e.g., a connected GUI/CLI),
// and sessionID as required to close targeted buses.
type watchSession struct {
ch chan *ipn.Notify
+ owner ipnauth.Actor // or nil
sessionID string
cancel func() // call to signal that the session must be terminated
}
@@ -174,27 +178,28 @@ type watchSession struct {
// state machine generates events back out to zero or more components.
type LocalBackend struct {
// Elements that are thread-safe or constant after construction.
- ctx context.Context // canceled by Close
- ctxCancel context.CancelFunc // cancels ctx
- logf logger.Logf // general logging
- keyLogf logger.Logf // for printing list of peers on change
- statsLogf logger.Logf // for printing peers stats on change
- sys *tsd.System
- health *health.Tracker // always non-nil
- metrics metrics
- e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys
- store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys
- dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys
- pushDeviceToken syncs.AtomicValue[string]
- backendLogID logid.PublicID
- unregisterNetMon func()
- unregisterHealthWatch func()
- portpoll *portlist.Poller // may be nil
- portpollOnce sync.Once // guards starting readPoller
- varRoot string // or empty if SetVarRoot never called
- logFlushFunc func() // or nil if SetLogFlusher wasn't called
- em *expiryManager // non-nil
- sshAtomicBool atomic.Bool
+ ctx context.Context // canceled by Close
+ ctxCancel context.CancelFunc // cancels ctx
+ logf logger.Logf // general logging
+ keyLogf logger.Logf // for printing list of peers on change
+ statsLogf logger.Logf // for printing peers stats on change
+ sys *tsd.System
+ health *health.Tracker // always non-nil
+ metrics metrics
+ e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys
+ store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys
+ dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys
+ pushDeviceToken syncs.AtomicValue[string]
+ backendLogID logid.PublicID
+ unregisterNetMon func()
+ unregisterHealthWatch func()
+ unregisterSysPolicyWatch func()
+ portpoll *portlist.Poller // may be nil
+ portpollOnce sync.Once // guards starting readPoller
+ varRoot string // or empty if SetVarRoot never called
+ logFlushFunc func() // or nil if SetLogFlusher wasn't called
+ em *expiryManager // non-nil
+ sshAtomicBool atomic.Bool
// webClientAtomicBool controls whether the web client is running. This should
// be true unless the disable-web-client node attribute has been set.
webClientAtomicBool atomic.Bool
@@ -264,9 +269,9 @@ type LocalBackend struct {
endpoints []tailcfg.Endpoint
blocked bool
keyExpired bool
- authURL string // non-empty if not Running
- authURLTime time.Time // when the authURL was received from the control server
- interact bool // indicates whether a user requested interactive login
+ authURL string // non-empty if not Running
+ authURLTime time.Time // when the authURL was received from the control server
+ authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil
egg bool
prevIfState *netmon.State
peerAPIServer *peerAPIServer // or nil
@@ -350,6 +355,12 @@ type LocalBackend struct {
// avoid unnecessary churn between multiple equally-good options.
lastSuggestedExitNode tailcfg.StableNodeID
+ // allowedSuggestedExitNodes is a set of exit nodes permitted by the most recent
+ // [syspolicy.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu
+ // mutex guards access to this set.
+ allowedSuggestedExitNodesMu sync.Mutex
+ allowedSuggestedExitNodes set.Set[tailcfg.StableNodeID]
+
// refreshAutoExitNode indicates if the exit node should be recomputed when the next netcheck report is available.
refreshAutoExitNode bool
@@ -396,11 +407,6 @@ type metrics struct {
// approvedRoutes is a metric that reports the number of network routes served by the local node and approved
// by the control server.
approvedRoutes *usermetric.Gauge
-
- // primaryRoutes is a metric that reports the number of primary network routes served by the local node.
- // A route being a primary route implies that the route is currently served by this node, and not by another
- // subnet router in a high availability configuration.
- primaryRoutes *usermetric.Gauge
}
// clientGen is a func that creates a control plane client.
@@ -411,7 +417,7 @@ type clientGen func(controlclient.Options) (controlclient.Client, error)
// but is not actually running.
//
// If dialer is nil, a new one is made.
-func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (*LocalBackend, error) {
+func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (_ *LocalBackend, err error) {
e := sys.Engine.Get()
store := sys.StateStore.Get()
dialer := sys.Dialer.Get()
@@ -451,8 +457,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
"tailscaled_advertised_routes", "Number of advertised network routes (e.g. by a subnet router)"),
approvedRoutes: sys.UserMetricsRegistry().NewGauge(
"tailscaled_approved_routes", "Number of approved network routes (e.g. by a subnet router)"),
- primaryRoutes: sys.UserMetricsRegistry().NewGauge(
- "tailscaled_primary_routes", "Number of network routes for which this node is a primary router (in high availability configuration)"),
}
b := &LocalBackend{
@@ -483,11 +487,20 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
mConn.SetNetInfoCallback(b.setNetInfo)
if sys.InitialConfig != nil {
- if err := b.setConfigLocked(sys.InitialConfig); err != nil {
+ if err := b.initPrefsFromConfig(sys.InitialConfig); err != nil {
return nil, err
}
}
+ if b.unregisterSysPolicyWatch, err = b.registerSysPolicyWatch(); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ b.unregisterSysPolicyWatch()
+ }
+ }()
+
netMon := sys.NetMon.Get()
b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker())
if err != nil {
@@ -716,8 +729,8 @@ func (b *LocalBackend) SetDirectFileRoot(dir string) {
// It returns (false, nil) if not running in declarative mode, (true, nil) on
// success, or (false, error) on failure.
func (b *LocalBackend) ReloadConfig() (ok bool, err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
+ unlock := b.lockAndGetUnlock()
+ defer unlock()
if b.conf == nil {
return false, nil
}
@@ -725,18 +738,21 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) {
if err != nil {
return false, err
}
- if err := b.setConfigLocked(conf); err != nil {
+ if err := b.setConfigLockedOnEntry(conf, unlock); err != nil {
return false, fmt.Errorf("error setting config: %w", err)
}
return true, nil
}
-func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error {
-
- // TODO(irbekrm): notify the relevant components to consume any prefs
- // updates. Currently only initial configfile settings are applied
- // immediately.
+// initPrefsFromConfig initializes the backend's prefs from the provided config.
+// This should only be called once, at startup. For updates at runtime, use
+// [LocalBackend.setConfigLocked].
+func (b *LocalBackend) initPrefsFromConfig(conf *conffile.Config) error {
+ // TODO(maisem,bradfitz): combine this with setConfigLocked. This is called
+ // before anything is running, so there's no need to lock and we don't
+ // update any subsystems. At runtime, we both need to lock and update
+ // subsystems with the new prefs.
p := b.pm.CurrentPrefs().AsStruct()
mp, err := conf.Parsed.ToPrefs()
if err != nil {
@@ -746,13 +762,14 @@ func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error {
if err := b.pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil {
return err
}
+ b.setStaticEndpointsFromConfigLocked(conf)
+ b.conf = conf
+ return nil
+}
- defer func() {
- b.conf = conf
- }()
-
+func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) {
if conf.Parsed.StaticEndpoints == nil && (b.conf == nil || b.conf.Parsed.StaticEndpoints == nil) {
- return nil
+ return
}
// Ensure that magicsock conn has the up to date static wireguard
@@ -766,6 +783,22 @@ func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error {
ms.SetStaticEndpoints(views.SliceOf(conf.Parsed.StaticEndpoints))
}
}
+}
+
+// setConfigLockedOnEntry uses the provided config to update the backend's prefs
+// and other state.
+func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error {
+ defer unlock()
+ p := b.pm.CurrentPrefs().AsStruct()
+ mp, err := conf.Parsed.ToPrefs()
+ if err != nil {
+ return fmt.Errorf("error parsing config to prefs: %w", err)
+ }
+ p.ApplyEdits(&mp)
+ b.setStaticEndpointsFromConfigLocked(conf)
+ b.setPrefsLockedOnEntry(p, unlock)
+
+ b.conf = conf
return nil
}
@@ -784,6 +817,19 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() {
b.cc.SetPaused((b.state == ipn.Stopped && b.netMap != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest()))
}
+// DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe
+// inactive. This can be used to ensure that nodes that are HA subnet router or app connector replicas are shutting
+// down, clients switch over to other replicas whilst the existing connections are kept alive for some period of time.
+func (b *LocalBackend) DisconnectControl() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ cc := b.resetControlClientLocked()
+ if cc == nil {
+ return
+ }
+ cc.Shutdown()
+}
+
// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken
// before running captive portal detection.
const captivePortalDetectionInterval = 2 * time.Second
@@ -951,6 +997,7 @@ func (b *LocalBackend) Shutdown() {
b.unregisterNetMon()
b.unregisterHealthWatch()
+ b.unregisterSysPolicyWatch()
if cc != nil {
cc.Shutdown()
}
@@ -1459,10 +1506,10 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control
b.logf("SetControlClientStatus failed to select auto exit node: %v", err)
}
}
- if setExitNodeID(prefs, curNetMap, b.lastSuggestedExitNode) {
+ if applySysPolicy(prefs, b.lastSuggestedExitNode) {
prefsChanged = true
}
- if applySysPolicy(prefs) {
+ if setExitNodeID(prefs, curNetMap) {
prefsChanged = true
}
@@ -1628,12 +1675,37 @@ var preferencePolicies = []preferencePolicyInfo{
// applySysPolicy overwrites configured preferences with policies that may be
// configured by the system administrator in an OS-specific way.
-func applySysPolicy(prefs *ipn.Prefs) (anyChange bool) {
+func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID) (anyChange bool) {
if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL {
prefs.ControlURL = controlURL
anyChange = true
}
+ if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" {
+ exitNodeID := tailcfg.StableNodeID(exitNodeIDStr)
+ if shouldAutoExitNode() && lastSuggestedExitNode != "" {
+ exitNodeID = lastSuggestedExitNode
+ }
+ // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "",
+ // then exitNodeID is now "auto" which will never match a peer's node ID.
+ // When there is no a peer matching the node ID, traffic will blackhole,
+ // preventing accidental non-exit-node usage when a policy is in effect that requires an exit node.
+ if prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid() {
+ anyChange = true
+ }
+ prefs.ExitNodeID = exitNodeID
+ prefs.ExitNodeIP = netip.Addr{}
+ } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" {
+ exitNodeIP, err := netip.ParseAddr(exitNodeIPStr)
+ if exitNodeIP.IsValid() && err == nil {
+ if prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP {
+ anyChange = true
+ }
+ prefs.ExitNodeID = ""
+ prefs.ExitNodeIP = exitNodeIP
+ }
+ }
+
for _, opt := range preferencePolicies {
if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil {
curVal := opt.get(prefs.View())
@@ -1648,6 +1720,54 @@ func applySysPolicy(prefs *ipn.Prefs) (anyChange bool) {
return anyChange
}
+// registerSysPolicyWatch subscribes to syspolicy change notifications
+// and immediately applies the effective syspolicy settings to the current profile.
+func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) {
+ if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil {
+ return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err)
+ }
+ if prefs, anyChange := b.applySysPolicy(); anyChange {
+ b.logf("syspolicy: changed initial profile prefs: %v", prefs.Pretty())
+ }
+ b.refreshAllowedSuggestions()
+ return unregister, nil
+}
+
+// applySysPolicy overwrites the current profile's preferences with policies
+// that may be configured by the system administrator in an OS-specific way.
+//
+// b.mu must not be held.
+func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) {
+ unlock := b.lockAndGetUnlock()
+ prefs := b.pm.CurrentPrefs().AsStruct()
+ if !applySysPolicy(prefs, b.lastSuggestedExitNode) {
+ unlock.UnlockEarly()
+ return prefs.View(), false
+ }
+ return b.setPrefsLockedOnEntry(prefs, unlock), true
+}
+
+// sysPolicyChanged is a callback triggered by syspolicy when it detects
+// a change in one or more syspolicy settings.
+func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) {
+ if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) {
+ b.refreshAllowedSuggestions()
+ // Re-evaluate exit node suggestion now that the policy setting has changed.
+ b.mu.Lock()
+ _, err := b.suggestExitNodeLocked(nil)
+ b.mu.Unlock()
+ if err != nil && !errors.Is(err, ErrNoPreferredDERP) {
+ b.logf("failed to select auto exit node: %v", err)
+ }
+ // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID
+ // will be used when [applySysPolicy] updates the current profile's prefs.
+ }
+
+ if prefs, anyChange := b.applySysPolicy(); anyChange {
+ b.logf("syspolicy: changed profile prefs: %v", prefs.Pretty())
+ }
+}
+
var _ controlclient.NetmapDeltaUpdater = (*LocalBackend)(nil)
// UpdateNetmapDelta implements controlclient.NetmapDeltaUpdater.
@@ -1740,30 +1860,7 @@ func (b *LocalBackend) updateNetmapDeltaLocked(muts []netmap.NodeMutation) (hand
// setExitNodeID updates prefs to reference an exit node by ID, rather
// than by IP. It returns whether prefs was mutated.
-func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNode tailcfg.StableNodeID) (prefsChanged bool) {
- if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" {
- exitNodeID := tailcfg.StableNodeID(exitNodeIDStr)
- if shouldAutoExitNode() && lastSuggestedExitNode != "" {
- exitNodeID = lastSuggestedExitNode
- }
- // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "", then exitNodeID is now "auto" which will never match a peer's node ID.
- // When there is no a peer matching the node ID, traffic will blackhole, preventing accidental non-exit-node usage when a policy is in effect that requires an exit node.
- changed := prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid()
- prefs.ExitNodeID = exitNodeID
- prefs.ExitNodeIP = netip.Addr{}
- return changed
- }
-
- oldExitNodeID := prefs.ExitNodeID
- if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" {
- exitNodeIP, err := netip.ParseAddr(exitNodeIPStr)
- if exitNodeIP.IsValid() && err == nil {
- prefsChanged = prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP
- prefs.ExitNodeID = ""
- prefs.ExitNodeIP = exitNodeIP
- }
- }
-
+func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) {
if nm == nil {
// No netmap, can't resolve anything.
return false
@@ -1781,9 +1878,9 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNod
prefsChanged = true
}
+ oldExitNodeID := prefs.ExitNodeID
for _, peer := range nm.Peers {
- for i := range peer.Addresses().Len() {
- addr := peer.Addresses().At(i)
+ for _, addr := range peer.Addresses().All() {
if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP {
continue
}
@@ -1791,7 +1888,7 @@ func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap, lastSuggestedExitNod
// reference it directly for next time.
prefs.ExitNodeID = peer.StableID()
prefs.ExitNodeIP = netip.Addr{}
- return oldExitNodeID != prefs.ExitNodeID
+ return prefsChanged || oldExitNodeID != prefs.ExitNodeID
}
}
@@ -2128,10 +2225,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
blid := b.backendLogID.String()
b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID)
- b.sendLocked(ipn.Notify{
- BackendLogID: &blid,
- Prefs: &prefs,
- })
+ b.sendToLocked(ipn.Notify{Prefs: &prefs}, allClients)
if !loggedOut && (b.hasNodeKeyLocked() || confWantRunning) {
// If we know that we're either logged in or meant to be
@@ -2661,10 +2755,15 @@ func applyConfigToHostinfo(hi *tailcfg.Hostinfo, c *conffile.Config) {
// notifications. There is currently (2022-11-22) no mechanism provided to
// detect when a message has been dropped.
func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) {
- ch := make(chan *ipn.Notify, 128)
+ b.WatchNotificationsAs(ctx, nil, mask, onWatchAdded, fn)
+}
+// WatchNotificationsAs is like WatchNotifications but takes an [ipnauth.Actor]
+// as an additional parameter. If non-nil, the specified callback is invoked
+// only for notifications relevant to this actor.
+func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.Actor, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) {
+ ch := make(chan *ipn.Notify, 128)
sessionID := rands.HexString(16)
-
origFn := fn
if mask&ipn.NotifyNoPrivateKeys != 0 {
fn = func(n *ipn.Notify) bool {
@@ -2716,6 +2815,7 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
session := &watchSession{
ch: ch,
+ owner: actor,
sessionID: sessionID,
cancel: cancel,
}
@@ -2753,20 +2853,17 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa
go b.pollRequestEngineStatus(ctx)
}
- // TODO(marwan-at-work): check err
// TODO(marwan-at-work): streaming background logs?
defer b.DeleteForegroundSession(sessionID)
- for {
- select {
- case <-ctx.Done():
- return
- case n := <-ch:
- if !fn(n) {
- return
- }
- }
+ sender := &rateLimitingBusSender{fn: fn}
+ defer sender.close()
+
+ if mask&ipn.NotifyRateLimit != 0 {
+ sender.interval = 3 * time.Second
}
+
+ sender.Run(ctx, ch)
}
// pollRequestEngineStatus calls b.e.RequestStatus every 2 seconds until ctx
@@ -2828,6 +2925,12 @@ func (b *LocalBackend) DebugPickNewDERP() error {
return b.sys.MagicSock.Get().DebugPickNewDERP()
}
+// DebugForcePreferDERP forwards to netcheck.DebugForcePreferDERP.
+// See its docs.
+func (b *LocalBackend) DebugForcePreferDERP(n int) {
+ b.sys.MagicSock.Get().DebugForcePreferDERP(n)
+}
+
// send delivers n to the connected frontend and any API watchers from
// LocalBackend.WatchNotifications (via the LocalAPI).
//
@@ -2838,13 +2941,71 @@ func (b *LocalBackend) DebugPickNewDERP() error {
//
// b.mu must not be held.
func (b *LocalBackend) send(n ipn.Notify) {
+ b.sendTo(n, allClients)
+}
+
+// notificationTarget describes a notification recipient.
+// A zero value is valid and indicate that the notification
+// should be broadcast to all active [watchSession]s.
+type notificationTarget struct {
+ // userID is the OS-specific UID of the target user.
+ // If empty, the notification is not user-specific and
+ // will be broadcast to all connected users.
+ // TODO(nickkhyl): make this field cross-platform rather
+ // than Windows-specific.
+ userID ipn.WindowsUserID
+ // clientID identifies a client that should be the exclusive recipient
+ // of the notification. A zero value indicates that notification should
+ // be sent to all sessions of the specified user.
+ clientID ipnauth.ClientID
+}
+
+var allClients = notificationTarget{} // broadcast to all connected clients
+
+// toNotificationTarget returns a [notificationTarget] that matches only actors
+// representing the same user as the specified actor. If the actor represents
+// a specific connected client, the [ipnauth.ClientID] must also match.
+// If the actor is nil, the [notificationTarget] matches all actors.
+func toNotificationTarget(actor ipnauth.Actor) notificationTarget {
+ t := notificationTarget{}
+ if actor != nil {
+ t.userID = actor.UserID()
+ t.clientID, _ = actor.ClientID()
+ }
+ return t
+}
+
+// match reports whether the specified actor should receive notifications
+// targeting t. If the actor is nil, it should only receive notifications
+// intended for all users.
+func (t notificationTarget) match(actor ipnauth.Actor) bool {
+ if t == allClients {
+ return true
+ }
+ if actor == nil {
+ return false
+ }
+ if t.userID != "" && t.userID != actor.UserID() {
+ return false
+ }
+ if t.clientID != ipnauth.NoClientID {
+ clientID, ok := actor.ClientID()
+ if !ok || clientID != t.clientID {
+ return false
+ }
+ }
+ return true
+}
+
+// sendTo is like [LocalBackend.send] but allows specifying a recipient.
+func (b *LocalBackend) sendTo(n ipn.Notify, recipient notificationTarget) {
b.mu.Lock()
defer b.mu.Unlock()
- b.sendLocked(n)
+ b.sendToLocked(n, recipient)
}
-// sendLocked is like send, but assumes b.mu is already held.
-func (b *LocalBackend) sendLocked(n ipn.Notify) {
+// sendToLocked is like [LocalBackend.sendTo], but assumes b.mu is already held.
+func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) {
if n.Prefs != nil {
n.Prefs = ptr.To(stripKeysFromPrefs(*n.Prefs))
}
@@ -2858,10 +3019,12 @@ func (b *LocalBackend) sendLocked(n ipn.Notify) {
}
for _, sess := range b.notifyWatchers {
- select {
- case sess.ch <- &n:
- default:
- // Drop the notification if the channel is full.
+ if recipient.match(sess.owner) {
+ select {
+ case sess.ch <- &n:
+ default:
+ // Drop the notification if the channel is full.
+ }
}
}
}
@@ -2896,15 +3059,18 @@ func (b *LocalBackend) sendFileNotify() {
// This method is called when a new authURL is received from the control plane, meaning that either a user
// has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI),
// or the control plane was unable to authenticate this node non-interactively (e.g., due to key expiration).
-// b.interact indicates whether an interactive login is in progress.
+// A non-nil b.authActor indicates that an interactive login is in progress and was initiated by the specified actor.
// If url is "", it is equivalent to calling [LocalBackend.resetAuthURLLocked] with b.mu held.
func (b *LocalBackend) setAuthURL(url string) {
var popBrowser, keyExpired bool
+ var recipient ipnauth.Actor
b.mu.Lock()
switch {
case url == "":
b.resetAuthURLLocked()
+ b.mu.Unlock()
+ return
case b.authURL != url:
b.authURL = url
b.authURLTime = b.clock.Now()
@@ -2913,26 +3079,27 @@ func (b *LocalBackend) setAuthURL(url string) {
popBrowser = true
default:
// Otherwise, only open it if the user explicitly requests interactive login.
- popBrowser = b.interact
+ popBrowser = b.authActor != nil
}
keyExpired = b.keyExpired
+ recipient = b.authActor // or nil
// Consume the StartLoginInteractive call, if any, that caused the control
// plane to send us this URL.
- b.interact = false
+ b.authActor = nil
b.mu.Unlock()
if popBrowser {
- b.popBrowserAuthNow(url, keyExpired)
+ b.popBrowserAuthNow(url, keyExpired, recipient)
}
}
-// popBrowserAuthNow shuts down the data plane and sends an auth URL
-// to the connected frontend, if any.
+// popBrowserAuthNow shuts down the data plane and sends the URL to the recipient's
+// [watchSession]s if the recipient is non-nil; otherwise, it sends the URL to all watchSessions.
// keyExpired is the value of b.keyExpired upon entry and indicates
// whether the node's key has expired.
// It must not be called with b.mu held.
-func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool) {
- b.logf("popBrowserAuthNow: url=%v, key-expired=%v, seamless-key-renewal=%v", url != "", keyExpired, b.seamlessRenewalEnabled())
+func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient ipnauth.Actor) {
+ b.logf("popBrowserAuthNow(%q): url=%v, key-expired=%v, seamless-key-renewal=%v", maybeUsernameOf(recipient), url != "", keyExpired, b.seamlessRenewalEnabled())
// Deconfigure the local network data plane if:
// - seamless key renewal is not enabled;
@@ -2941,7 +3108,7 @@ func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool) {
b.blockEngineUpdates(true)
b.stopEngineAndWait()
}
- b.tellClientToBrowseToURL(url)
+ b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient))
if b.State() == ipn.Running {
b.enterState(ipn.Starting)
}
@@ -2982,8 +3149,13 @@ func (b *LocalBackend) validPopBrowserURL(urlStr string) bool {
}
func (b *LocalBackend) tellClientToBrowseToURL(url string) {
+ b.tellRecipientToBrowseToURL(url, allClients)
+}
+
+// tellRecipientToBrowseToURL is like tellClientToBrowseToURL but allows specifying a recipient.
+func (b *LocalBackend) tellRecipientToBrowseToURL(url string, recipient notificationTarget) {
if b.validPopBrowserURL(url) {
- b.send(ipn.Notify{BrowseToURL: &url})
+ b.sendTo(ipn.Notify{BrowseToURL: &url}, recipient)
}
}
@@ -3255,6 +3427,15 @@ func (b *LocalBackend) tryLookupUserName(uid string) string {
// StartLoginInteractive attempts to pick up the in-progress flow where it left
// off.
func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error {
+ return b.StartLoginInteractiveAs(ctx, nil)
+}
+
+// StartLoginInteractiveAs is like StartLoginInteractive but takes an [ipnauth.Actor]
+// as an additional parameter. If non-nil, the specified user is expected to complete
+// the interactive login, and therefore will receive the BrowseToURL notification once
+// the control plane sends us one. Otherwise, the notification will be delivered to all
+// active [watchSession]s.
+func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth.Actor) error {
b.mu.Lock()
if b.cc == nil {
panic("LocalBackend.assertClient: b.cc == nil")
@@ -3268,17 +3449,17 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error {
hasValidURL := url != "" && timeSinceAuthURLCreated < ((7*24*time.Hour)-(1*time.Hour))
if !hasValidURL {
// A user wants to log in interactively, but we don't have a valid authURL.
- // Set a flag to indicate that interactive login is in progress, forcing
- // a BrowseToURL notification once the authURL becomes available.
- b.interact = true
+ // Remember the user who initiated the login, so that we can notify them
+ // once the authURL is available.
+ b.authActor = user
}
cc := b.cc
b.mu.Unlock()
- b.logf("StartLoginInteractive: url=%v", hasValidURL)
+ b.logf("StartLoginInteractiveAs(%q): url=%v", maybeUsernameOf(user), hasValidURL)
if hasValidURL {
- b.popBrowserAuthNow(url, keyExpired)
+ b.popBrowserAuthNow(url, keyExpired, user)
} else {
cc.Login(b.loginFlags | controlclient.LoginInteractive)
}
@@ -3489,7 +3670,7 @@ func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error {
if !p.RunSSH {
return nil
}
- if err := envknob.CanRunTailscaleSSH(); err != nil {
+ if err := featureknob.CanRunTailscaleSSH(); err != nil {
return err
}
if runtime.GOOS == "linux" {
@@ -3570,7 +3751,16 @@ func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTrac
}
func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error {
- if (p.ExitNodeIP.IsValid() || p.ExitNodeID != "") && p.AdvertisesExitNode() {
+ tryingToUseExitNode := p.ExitNodeIP.IsValid() || p.ExitNodeID != ""
+ if !tryingToUseExitNode {
+ return nil
+ }
+
+ if err := featureknob.CanUseExitNode(); err != nil {
+ return err
+ }
+
+ if p.AdvertisesExitNode() {
return errors.New("Cannot advertise an exit node and use an exit node at the same time.")
}
return nil
@@ -3738,12 +3928,12 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce)
if oldp.Valid() {
newp.Persist = oldp.Persist().AsStruct() // caller isn't allowed to override this
}
- // setExitNodeID returns whether it updated b.prefs, but
- // everything in this function treats b.prefs as completely new
- // anyway. No-op if no exit node resolution is needed.
- setExitNodeID(newp, netMap, b.lastSuggestedExitNode)
- // applySysPolicy does likewise so we can also ignore its return value.
- applySysPolicy(newp)
+ // applySysPolicyToPrefsLocked returns whether it updated newp,
+ // but everything in this function treats b.prefs as completely new
+ // anyway, so its return value can be ignored here.
+ applySysPolicy(newp, b.lastSuggestedExitNode)
+ // setExitNodeID does likewise. No-op if no exit node resolution is needed.
+ setExitNodeID(newp, netMap)
// We do this to avoid holding the lock while doing everything else.
oldHi := b.hostinfo
@@ -3780,10 +3970,14 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce)
}
prefs := newp.View()
- if err := b.pm.SetPrefs(prefs, ipn.NetworkProfile{
- MagicDNSName: b.netMap.MagicDNSSuffix(),
- DomainName: b.netMap.DomainName(),
- }); err != nil {
+ np := b.pm.CurrentProfile().NetworkProfile
+ if netMap != nil {
+ np = ipn.NetworkProfile{
+ MagicDNSName: b.netMap.MagicDNSSuffix(),
+ DomainName: b.netMap.DomainName(),
+ }
+ }
+ if err := b.pm.SetPrefs(prefs, np); err != nil {
b.logf("failed to save new controlclient state: %v", err)
}
@@ -4777,6 +4971,14 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip
}
hi.SSH_HostKeys = sshHostKeys
+ services := vipServicesFromPrefs(prefs)
+ if len(services) > 0 {
+ buf, _ := json.Marshal(services)
+ hi.ServicesHash = fmt.Sprintf("%02x", sha256.Sum256(buf))
+ } else {
+ hi.ServicesHash = ""
+ }
+
// The Hostinfo.WantIngress field tells control whether this node wants to
// be wired up for ingress connections. If harmless if it's accidentally
// true; the actual policy is controlled in tailscaled by ServeConfig. But
@@ -4885,8 +5087,8 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock
case ipn.Running:
var addrStrs []string
addrs := netMap.GetAddresses()
- for i := range addrs.Len() {
- addrStrs = append(addrStrs, addrs.At(i).Addr().String())
+ for _, p := range addrs.All() {
+ addrStrs = append(addrStrs, p.Addr().String())
}
systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " "))
case ipn.NoState:
@@ -5124,7 +5326,7 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client {
func (b *LocalBackend) resetAuthURLLocked() {
b.authURL = ""
b.authURLTime = time.Time{}
- b.interact = false
+ b.authActor = nil
}
// ResetForClientDisconnect resets the backend for GUI clients running
@@ -5391,7 +5593,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
// If there is no netmap, the client is going into a "turned off"
// state so reset the metrics.
b.metrics.approvedRoutes.Set(0)
- b.metrics.primaryRoutes.Set(0)
return
}
@@ -5420,7 +5621,6 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
}
}
b.metrics.approvedRoutes.Set(approved)
- b.metrics.primaryRoutes.Set(float64(tsaddr.WithoutExitRoute(nm.SelfNode.PrimaryRoutes()).Len()))
}
for _, p := range nm.Peers {
addNode(p)
@@ -5979,8 +6179,7 @@ func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error {
func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) {
svcs := peer.Hostinfo().Services()
- for i := range svcs.Len() {
- s := svcs.At(i)
+ for _, s := range svcs.All() {
switch s.Proto {
case tailcfg.PeerAPI4:
p4 = s.Port
@@ -6012,8 +6211,7 @@ func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string {
var have4, have6 bool
addrs := nm.GetAddresses()
- for i := range addrs.Len() {
- a := addrs.At(i)
+ for _, a := range addrs.All() {
if !a.IsSingleIP() {
continue
}
@@ -6035,10 +6233,9 @@ func peerAPIBase(nm *netmap.NetworkMap, peer tailcfg.NodeView) string {
}
func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr {
- for i := range n.Addresses().Len() {
- a := n.Addresses().At(i)
- if a.IsSingleIP() && pred(a.Addr()) {
- return a.Addr()
+ for _, pfx := range n.Addresses().All() {
+ if pfx.IsSingleIP() && pred(pfx.Addr()) {
+ return pfx.Addr()
}
}
return netip.Addr{}
@@ -6268,8 +6465,8 @@ func peerCanProxyDNS(p tailcfg.NodeView) bool {
// If p.Cap is not populated (e.g. older control server), then do the old
// thing of searching through services.
services := p.Hostinfo().Services()
- for i := range services.Len() {
- if s := services.At(i); s.Proto == tailcfg.PeerAPIDNS && s.Port >= 1 {
+ for _, s := range services.All() {
+ if s.Proto == tailcfg.PeerAPIDNS && s.Port >= 1 {
return true
}
}
@@ -7035,7 +7232,7 @@ func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (respons
lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx)
prevSuggestion := b.lastSuggestedExitNode
- res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, getAllowedSuggestions())
+ res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions())
if err != nil {
return res, err
}
@@ -7049,6 +7246,22 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes
return b.suggestExitNodeLocked(nil)
}
+// getAllowedSuggestions returns a set of exit nodes permitted by the most recent
+// [syspolicy.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set.
+func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] {
+ b.allowedSuggestedExitNodesMu.Lock()
+ defer b.allowedSuggestedExitNodesMu.Unlock()
+ return b.allowedSuggestedExitNodes
+}
+
+// refreshAllowedSuggestions rebuilds the set of permitted exit nodes
+// from the current [syspolicy.AllowedSuggestedExitNodes] value.
+func (b *LocalBackend) refreshAllowedSuggestions() {
+ b.allowedSuggestedExitNodesMu.Lock()
+ defer b.allowedSuggestedExitNodesMu.Unlock()
+ b.allowedSuggestedExitNodes = fillAllowedSuggestions()
+}
+
// selectRegionFunc returns a DERP region from the slice of candidate regions.
// The value is returned, not the slice index.
type selectRegionFunc func(views.Slice[int]) int
@@ -7058,8 +7271,6 @@ type selectRegionFunc func(views.Slice[int]) int
// choice.
type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView
-var getAllowedSuggestions = lazy.SyncFunc(fillAllowedSuggestions)
-
func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] {
nodes, err := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil)
if err != nil {
@@ -7369,3 +7580,52 @@ func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCa
}
return n.HasCap(cap)
}
+
+// maybeUsernameOf returns the actor's username if the actor
+// is non-nil and its username can be resolved.
+func maybeUsernameOf(actor ipnauth.Actor) string {
+ var username string
+ if actor != nil {
+ username, _ = actor.Username()
+ }
+ return username
+}
+
+// VIPServices returns the list of tailnet services that this node
+// is serving as a destination for.
+// The returned memory is owned by the caller.
+func (b *LocalBackend) VIPServices() []*tailcfg.VIPService {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return vipServicesFromPrefs(b.pm.CurrentPrefs())
+}
+
+func vipServicesFromPrefs(prefs ipn.PrefsView) []*tailcfg.VIPService {
+ // keyed by service name
+ var services map[string]*tailcfg.VIPService
+
+ // TODO(naman): this envknob will be replaced with service-specific port
+ // information once we start storing that.
+ var allPortsServices []string
+ if env := envknob.String("TS_DEBUG_ALLPORTS_SERVICES"); env != "" {
+ allPortsServices = strings.Split(env, ",")
+ }
+
+ for _, s := range allPortsServices {
+ mak.Set(&services, s, &tailcfg.VIPService{
+ Name: s,
+ Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}},
+ })
+ }
+
+ for _, s := range prefs.AdvertiseServices().AsSlice() {
+ if services == nil || services[s] == nil {
+ mak.Set(&services, s, &tailcfg.VIPService{
+ Name: s,
+ })
+ }
+ services[s].Active = true
+ }
+
+ return slices.Collect(maps.Values(services))
+}
diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go
index b0e12d5005431..b1be86392185d 100644
--- a/ipn/ipnlocal/local_test.go
+++ b/ipn/ipnlocal/local_test.go
@@ -13,8 +13,10 @@ import (
"net/http"
"net/netip"
"os"
+ "path/filepath"
"reflect"
"slices"
+ "strings"
"sync"
"testing"
"time"
@@ -28,9 +30,12 @@ import (
"tailscale.com/control/controlclient"
"tailscale.com/drive"
"tailscale.com/drive/driveimpl"
+ "tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/hostinfo"
"tailscale.com/ipn"
+ "tailscale.com/ipn/conffile"
+ "tailscale.com/ipn/ipnauth"
"tailscale.com/ipn/store/mem"
"tailscale.com/net/netcheck"
"tailscale.com/net/netmon"
@@ -52,6 +57,8 @@ import (
"tailscale.com/util/must"
"tailscale.com/util/set"
"tailscale.com/util/syspolicy"
+ "tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
"tailscale.com/wgengine"
"tailscale.com/wgengine/filter"
"tailscale.com/wgengine/wgcfg"
@@ -428,20 +435,30 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) {
}
func newTestLocalBackend(t testing.TB) *LocalBackend {
+ return newTestLocalBackendWithSys(t, new(tsd.System))
+}
+
+// newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System.
+// If the state store or engine are not set in sys, they will be set to a new
+// in-memory store and fake userspace engine, respectively.
+func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend {
var logf logger.Logf = logger.Discard
- sys := new(tsd.System)
- store := new(mem.Store)
- sys.Set(store)
- eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry())
- if err != nil {
- t.Fatalf("NewFakeUserspaceEngine: %v", err)
+ if _, ok := sys.StateStore.GetOK(); !ok {
+ sys.Set(new(mem.Store))
+ }
+ if _, ok := sys.Engine.GetOK(); !ok {
+ eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry())
+ if err != nil {
+ t.Fatalf("NewFakeUserspaceEngine: %v", err)
+ }
+ t.Cleanup(eng.Close)
+ sys.Set(eng)
}
- t.Cleanup(eng.Close)
- sys.Set(eng)
lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0)
if err != nil {
t.Fatalf("NewLocalBackend: %v", err)
}
+ t.Cleanup(lb.Shutdown)
return lb
}
@@ -1557,94 +1574,6 @@ func dnsResponse(domain, address string) []byte {
return must.Get(b.Finish())
}
-type errorSyspolicyHandler struct {
- t *testing.T
- err error
- key syspolicy.Key
- allowKeys map[syspolicy.Key]*string
-}
-
-func (h *errorSyspolicyHandler) ReadString(key string) (string, error) {
- sk := syspolicy.Key(key)
- if _, ok := h.allowKeys[sk]; !ok {
- h.t.Errorf("ReadString: %q is not in list of permitted keys", h.key)
- }
- if sk == h.key {
- return "", h.err
- }
- return "", syspolicy.ErrNoSuchKey
-}
-
-func (h *errorSyspolicyHandler) ReadUInt64(key string) (uint64, error) {
- h.t.Errorf("ReadUInt64(%q) unexpectedly called", key)
- return 0, syspolicy.ErrNoSuchKey
-}
-
-func (h *errorSyspolicyHandler) ReadBoolean(key string) (bool, error) {
- h.t.Errorf("ReadBoolean(%q) unexpectedly called", key)
- return false, syspolicy.ErrNoSuchKey
-}
-
-func (h *errorSyspolicyHandler) ReadStringArray(key string) ([]string, error) {
- h.t.Errorf("ReadStringArray(%q) unexpectedly called", key)
- return nil, syspolicy.ErrNoSuchKey
-}
-
-type mockSyspolicyHandler struct {
- t *testing.T
- // stringPolicies is the collection of policies that we expect to see
- // queried by the current test. If the policy is expected but unset, then
- // use nil, otherwise use a string equal to the policy's desired value.
- stringPolicies map[syspolicy.Key]*string
- // stringArrayPolicies is the collection of policies that we expected to see
- // queries by the current test, that return policy string arrays.
- stringArrayPolicies map[syspolicy.Key][]string
- // failUnknownPolicies is set if policies other than those in stringPolicies
- // (uint64 or bool policies are not supported by mockSyspolicyHandler yet)
- // should be considered a test failure if they are queried.
- failUnknownPolicies bool
-}
-
-func (h *mockSyspolicyHandler) ReadString(key string) (string, error) {
- if s, ok := h.stringPolicies[syspolicy.Key(key)]; ok {
- if s == nil {
- return "", syspolicy.ErrNoSuchKey
- }
- return *s, nil
- }
- if h.failUnknownPolicies {
- h.t.Errorf("ReadString(%q) unexpectedly called", key)
- }
- return "", syspolicy.ErrNoSuchKey
-}
-
-func (h *mockSyspolicyHandler) ReadUInt64(key string) (uint64, error) {
- if h.failUnknownPolicies {
- h.t.Errorf("ReadUInt64(%q) unexpectedly called", key)
- }
- return 0, syspolicy.ErrNoSuchKey
-}
-
-func (h *mockSyspolicyHandler) ReadBoolean(key string) (bool, error) {
- if h.failUnknownPolicies {
- h.t.Errorf("ReadBoolean(%q) unexpectedly called", key)
- }
- return false, syspolicy.ErrNoSuchKey
-}
-
-func (h *mockSyspolicyHandler) ReadStringArray(key string) ([]string, error) {
- if h.failUnknownPolicies {
- h.t.Errorf("ReadStringArray(%q) unexpectedly called", key)
- }
- if s, ok := h.stringArrayPolicies[syspolicy.Key(key)]; ok {
- if s == nil {
- return []string{}, syspolicy.ErrNoSuchKey
- }
- return s, nil
- }
- return nil, syspolicy.ErrNoSuchKey
-}
-
func TestSetExitNodeIDPolicy(t *testing.T) {
pfx := netip.MustParsePrefix
tests := []struct {
@@ -1854,23 +1783,21 @@ func TestSetExitNodeIDPolicy(t *testing.T) {
},
}
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
b := newTestBackend(t)
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: map[syspolicy.Key]*string{
- syspolicy.ExitNodeID: nil,
- syspolicy.ExitNodeIP: nil,
- },
- }
+
+ policyStore := source.NewTestStore(t)
if test.exitNodeIDKey {
- msh.stringPolicies[syspolicy.ExitNodeID] = &test.exitNodeID
+ policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID))
}
if test.exitNodeIPKey {
- msh.stringPolicies[syspolicy.ExitNodeIP] = &test.exitNodeIP
+ policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP))
}
- syspolicy.SetHandlerForTest(t, msh)
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
+
if test.nm == nil {
test.nm = new(netmap.NetworkMap)
}
@@ -1882,7 +1809,16 @@ func TestSetExitNodeIDPolicy(t *testing.T) {
b.netMap = test.nm
b.pm = pm
b.lastSuggestedExitNode = test.lastSuggestedExitNode
- changed := setExitNodeID(b.pm.prefs.AsStruct(), test.nm, tailcfg.StableNodeID(test.lastSuggestedExitNode))
+
+ prefs := b.pm.prefs.AsStruct()
+ if changed := applySysPolicy(prefs, test.lastSuggestedExitNode) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged {
+ t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed)
+ }
+
+ // Both [LocalBackend.SetPrefsForTest] and [LocalBackend.EditPrefs]
+ // apply syspolicy settings to the current profile's preferences. Therefore,
+ // we pass the current, unmodified preferences and expect the effective
+ // preferences to change.
b.SetPrefsForTest(pm.CurrentPrefs().AsStruct())
if got := b.pm.prefs.ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) {
@@ -1895,10 +1831,6 @@ func TestSetExitNodeIDPolicy(t *testing.T) {
} else if got.String() != test.exitNodeIPWant {
t.Errorf("got %v want %v", got, test.exitNodeIPWant)
}
-
- if changed != test.prefsChanged {
- t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed)
- }
})
}
}
@@ -1992,13 +1924,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) {
report: report,
},
}
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: map[syspolicy.Key]*string{
- syspolicy.ExitNodeID: ptr.To("auto:any"),
- },
- }
- syspolicy.SetHandlerForTest(t, msh)
+
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+ policyStore := source.NewTestStoreOf(t, source.TestSettingOf(
+ syspolicy.ExitNodeID, "auto:any",
+ ))
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b := newTestLocalBackend(t)
@@ -2047,13 +1979,11 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) {
}
cc = newClient(t, opts)
b.cc = cc
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: map[syspolicy.Key]*string{
- syspolicy.ExitNodeID: ptr.To("auto:any"),
- },
- }
- syspolicy.SetHandlerForTest(t, msh)
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+ policyStore := source.NewTestStoreOf(t, source.TestSettingOf(
+ syspolicy.ExitNodeID, "auto:any",
+ ))
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes())
peer2 := makePeer(2, withCap(26), withDERP(2), withSuggest(), withExitRoutes())
selfNode := tailcfg.Node{
@@ -2158,13 +2088,11 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) {
DERPMap: derpMap,
}
b := newTestLocalBackend(t)
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: map[syspolicy.Key]*string{
- syspolicy.ExitNodeID: ptr.To("auto:any"),
- },
- }
- syspolicy.SetHandlerForTest(t, msh)
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+ policyStore := source.NewTestStoreOf(t, source.TestSettingOf(
+ syspolicy.ExitNodeID, "auto:any",
+ ))
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
b.netMap = nm
b.lastSuggestedExitNode = peer1.StableID()
b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report)
@@ -2398,22 +2326,21 @@ func TestApplySysPolicy(t *testing.T) {
},
}
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: make(map[syspolicy.Key]*string, len(tt.stringPolicies)),
- }
+ settings := make([]source.TestSetting[string], 0, len(tt.stringPolicies))
for p, v := range tt.stringPolicies {
- v := v // construct a unique pointer for each policy value
- msh.stringPolicies[p] = &v
+ settings = append(settings, source.TestSettingOf(p, v))
}
- syspolicy.SetHandlerForTest(t, msh)
+ policyStore := source.NewTestStoreOf(t, settings...)
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
t.Run("unit", func(t *testing.T) {
prefs := tt.prefs.Clone()
- gotAnyChange := applySysPolicy(prefs)
+ gotAnyChange := applySysPolicy(prefs, "")
if gotAnyChange && prefs.Equals(&tt.prefs) {
t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty())
@@ -2544,40 +2471,24 @@ func TestPreferencePolicyInfo(t *testing.T) {
},
}
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for _, pp := range preferencePolicies {
t.Run(string(pp.key), func(t *testing.T) {
- var h syspolicy.Handler
-
- allPolicies := make(map[syspolicy.Key]*string, len(preferencePolicies)+1)
- allPolicies[syspolicy.ControlURL] = nil
- for _, pp := range preferencePolicies {
- allPolicies[pp.key] = nil
- }
-
- if tt.policyError != nil {
- h = &errorSyspolicyHandler{
- t: t,
- err: tt.policyError,
- key: pp.key,
- allowKeys: allPolicies,
- }
- } else {
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: allPolicies,
- failUnknownPolicies: true,
- }
- msh.stringPolicies[pp.key] = &tt.policyValue
- h = msh
+ s := source.TestSetting[string]{
+ Key: pp.key,
+ Error: tt.policyError,
+ Value: tt.policyValue,
}
- syspolicy.SetHandlerForTest(t, h)
+ policyStore := source.NewTestStoreOf(t, s)
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
prefs := defaultPrefs.AsStruct()
pp.set(prefs, tt.initialValue)
- gotAnyChange := applySysPolicy(prefs)
+ gotAnyChange := applySysPolicy(prefs, "")
if gotAnyChange != tt.wantChange {
t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange)
@@ -3140,12 +3051,10 @@ func deterministicNodeForTest(t testing.TB, want views.Slice[tailcfg.StableNodeI
var ret tailcfg.NodeView
gotIDs := make([]tailcfg.StableNodeID, got.Len())
- for i := range got.Len() {
- nv := got.At(i)
+ for i, nv := range got.All() {
if !nv.Valid() {
t.Fatalf("invalid node at index %v", i)
}
-
gotIDs[i] = nv.StableID()
if nv.StableID() == use {
ret = nv
@@ -3823,15 +3732,16 @@ func TestShouldAutoExitNode(t *testing.T) {
expectedBool: false,
},
}
+
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- msh := &mockSyspolicyHandler{
- t: t,
- stringPolicies: map[syspolicy.Key]*string{
- syspolicy.ExitNodeID: ptr.To(tt.exitNodeIDPolicyValue),
- },
- }
- syspolicy.SetHandlerForTest(t, msh)
+ policyStore := source.NewTestStoreOf(t, source.TestSettingOf(
+ syspolicy.ExitNodeID, tt.exitNodeIDPolicyValue,
+ ))
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
+
got := shouldAutoExitNode()
if got != tt.expectedBool {
t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue)
@@ -3969,17 +3879,13 @@ func TestFillAllowedSuggestions(t *testing.T) {
want: []tailcfg.StableNodeID{"ABC", "def", "gHiJ"},
},
}
+ syspolicy.RegisterWellKnownSettingsForTest(t)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- mh := mockSyspolicyHandler{
- t: t,
- }
- if tt.allowPolicy != nil {
- mh.stringArrayPolicies = map[syspolicy.Key][]string{
- syspolicy.AllowedSuggestedExitNodes: tt.allowPolicy,
- }
- }
- syspolicy.SetHandlerForTest(t, &mh)
+ policyStore := source.NewTestStoreOf(t, source.TestSettingOf(
+ syspolicy.AllowedSuggestedExitNodes, tt.allowPolicy,
+ ))
+ syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore)
got := fillAllowedSuggestions()
if got == nil {
@@ -3998,3 +3904,784 @@ func TestFillAllowedSuggestions(t *testing.T) {
})
}
}
+
+func TestNotificationTargetMatch(t *testing.T) {
+ tests := []struct {
+ name string
+ target notificationTarget
+ actor ipnauth.Actor
+ wantMatch bool
+ }{
+ {
+ name: "AllClients/Nil",
+ target: allClients,
+ actor: nil,
+ wantMatch: true,
+ },
+ {
+ name: "AllClients/NoUID/NoCID",
+ target: allClients,
+ actor: &ipnauth.TestActor{},
+ wantMatch: true,
+ },
+ {
+ name: "AllClients/WithUID/NoCID",
+ target: allClients,
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.NoClientID},
+ wantMatch: true,
+ },
+ {
+ name: "AllClients/NoUID/WithCID",
+ target: allClients,
+ actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: true,
+ },
+ {
+ name: "AllClients/WithUID/WithCID",
+ target: allClients,
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: true,
+ },
+ {
+ name: "FilterByUID/Nil",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: nil,
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID/NoUID/NoCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: &ipnauth.TestActor{},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID/NoUID/WithCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID/SameUID/NoCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4"},
+ wantMatch: true,
+ },
+ {
+ name: "FilterByUID/DifferentUID/NoCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8"},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID/SameUID/WithCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: true,
+ },
+ {
+ name: "FilterByUID/DifferentUID/WithCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8", CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByCID/Nil",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: nil,
+ wantMatch: false,
+ },
+ {
+ name: "FilterByCID/NoUID/NoCID",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByCID/NoUID/SameCID",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: true,
+ },
+ {
+ name: "FilterByCID/NoUID/DifferentCID",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("B")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByCID/WithUID/NoCID",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4"},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByCID/WithUID/SameCID",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: true,
+ },
+ {
+ name: "FilterByCID/WithUID/DifferentCID",
+ target: notificationTarget{clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("B")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/Nil",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4"},
+ actor: nil,
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/NoUID/NoCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/NoUID/SameCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/NoUID/DifferentCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{CID: ipnauth.ClientIDFrom("B")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/SameUID/NoCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4"},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/SameUID/SameCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: true,
+ },
+ {
+ name: "FilterByUID+CID/SameUID/DifferentCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-1-2-3-4", CID: ipnauth.ClientIDFrom("B")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/DifferentUID/NoCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8"},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/DifferentUID/SameCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8", CID: ipnauth.ClientIDFrom("A")},
+ wantMatch: false,
+ },
+ {
+ name: "FilterByUID+CID/DifferentUID/DifferentCID",
+ target: notificationTarget{userID: "S-1-5-21-1-2-3-4", clientID: ipnauth.ClientIDFrom("A")},
+ actor: &ipnauth.TestActor{UID: "S-1-5-21-5-6-7-8", CID: ipnauth.ClientIDFrom("B")},
+ wantMatch: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotMatch := tt.target.match(tt.actor)
+ if gotMatch != tt.wantMatch {
+ t.Errorf("match: got %v; want %v", gotMatch, tt.wantMatch)
+ }
+ })
+ }
+}
+
+type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client
+
+func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend {
+ logf := logger.Discard
+ if enableLogging {
+ logf = tstest.WhileTestRunningLogger(t)
+ }
+ sys := new(tsd.System)
+ store := new(mem.Store)
+ sys.Set(store)
+ e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry())
+ if err != nil {
+ t.Fatalf("NewFakeUserspaceEngine: %v", err)
+ }
+ t.Cleanup(e.Close)
+ sys.Set(e)
+
+ b, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0)
+ if err != nil {
+ t.Fatalf("NewLocalBackend: %v", err)
+ }
+ t.Cleanup(b.Shutdown)
+ b.DisablePortMapperForTest()
+
+ b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) {
+ return newControl(t, opts), nil
+ })
+ return b
+}
+
+// notificationHandler is any function that can process (e.g., check) a notification.
+// It returns whether the notification has been handled or should be passed to the next handler.
+// The handler may be called from any goroutine, so it must avoid calling functions
+// that are restricted to the goroutine running the test or benchmark function,
+// such as [testing.common.FailNow] and [testing.common.Fatalf].
+type notificationHandler func(testing.TB, ipnauth.Actor, *ipn.Notify) bool
+
+// wantedNotification names a [notificationHandler] that processes a notification
+// the test expects and wants to receive. The name is used to report notifications
+// that haven't been received within the expected timeout.
+type wantedNotification struct {
+ name string
+ cond notificationHandler
+}
+
+// notificationWatcher observes [LocalBackend] notifications as the specified actor,
+// reporting missing but expected notifications using [testing.common.Error],
+// and delegating the handling of unexpected notifications to the [notificationHandler]s.
+type notificationWatcher struct {
+ tb testing.TB
+ lb *LocalBackend
+ actor ipnauth.Actor
+
+ mu sync.Mutex
+ mask ipn.NotifyWatchOpt
+ want []wantedNotification // notifications we want to receive
+ unexpected []notificationHandler // funcs that are called to check any other notifications
+ ctxCancel context.CancelFunc // cancels the outstanding [LocalBackend.WatchNotificationsAs] call
+ got []*ipn.Notify // all notifications, both wanted and unexpected, we've received so far
+ gotWanted []*ipn.Notify // only the expected notifications; holds nil for any notification that hasn't been received
+ gotWantedCh chan struct{} // closed when we have received the last wanted notification
+ doneCh chan struct{} // closed when [LocalBackend.WatchNotificationsAs] returns
+}
+
+func newNotificationWatcher(tb testing.TB, lb *LocalBackend, actor ipnauth.Actor) *notificationWatcher {
+ return ¬ificationWatcher{tb: tb, lb: lb, actor: actor}
+}
+
+func (w *notificationWatcher) watch(mask ipn.NotifyWatchOpt, wanted []wantedNotification, unexpected ...notificationHandler) {
+ w.tb.Helper()
+
+ // Cancel any outstanding [LocalBackend.WatchNotificationsAs] calls.
+ w.mu.Lock()
+ ctxCancel := w.ctxCancel
+ doneCh := w.doneCh
+ w.mu.Unlock()
+ if doneCh != nil {
+ ctxCancel()
+ <-doneCh
+ }
+
+ doneCh = make(chan struct{})
+ gotWantedCh := make(chan struct{})
+ ctx, ctxCancel := context.WithCancel(context.Background())
+ w.tb.Cleanup(func() {
+ ctxCancel()
+ <-doneCh
+ })
+
+ w.mu.Lock()
+ w.mask = mask
+ w.want = wanted
+ w.unexpected = unexpected
+ w.ctxCancel = ctxCancel
+ w.got = nil
+ w.gotWanted = make([]*ipn.Notify, len(wanted))
+ w.gotWantedCh = gotWantedCh
+ w.doneCh = doneCh
+ w.mu.Unlock()
+
+ watchAddedCh := make(chan struct{})
+ go func() {
+ defer close(doneCh)
+ if len(wanted) == 0 {
+ close(gotWantedCh)
+ if len(unexpected) == 0 {
+ close(watchAddedCh)
+ return
+ }
+ }
+
+ var nextWantIdx int
+ w.lb.WatchNotificationsAs(ctx, w.actor, w.mask, func() { close(watchAddedCh) }, func(notify *ipn.Notify) (keepGoing bool) {
+ w.tb.Helper()
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.got = append(w.got, notify)
+
+ wanted := false
+ for i := nextWantIdx; i < len(w.want); i++ {
+ if wanted = w.want[i].cond(w.tb, w.actor, notify); wanted {
+ w.gotWanted[i] = notify
+ nextWantIdx = i + 1
+ break
+ }
+ }
+
+ if wanted && nextWantIdx == len(w.want) {
+ close(w.gotWantedCh)
+ if len(w.unexpected) == 0 {
+ // If we have received the last wanted notification,
+ // and we don't have any handlers for the unexpected notifications,
+ // we can stop the watcher right away.
+ return false
+ }
+
+ }
+
+ if !wanted {
+ // If we've received a notification we didn't expect,
+ // it could either be an unwanted notification caused by a bug
+ // or just a miscellaneous one that's irrelevant for the current test.
+ // Call unexpected notification handlers, if any, to
+ // check and fail the test if necessary.
+ for _, h := range w.unexpected {
+ if h(w.tb, w.actor, notify) {
+ break
+ }
+ }
+ }
+
+ return true
+ })
+
+ }()
+ <-watchAddedCh
+}
+
+func (w *notificationWatcher) check() []*ipn.Notify {
+ w.tb.Helper()
+
+ w.mu.Lock()
+ cancel := w.ctxCancel
+ gotWantedCh := w.gotWantedCh
+ checkUnexpected := len(w.unexpected) != 0
+ doneCh := w.doneCh
+ w.mu.Unlock()
+
+ // Wait for up to 10 seconds to receive expected notifications.
+ timeout := 10 * time.Second
+ for {
+ select {
+ case <-gotWantedCh:
+ if checkUnexpected {
+ gotWantedCh = nil
+ // But do not wait longer than 500ms for unexpected notifications after
+ // the expected notifications have been received.
+ timeout = 500 * time.Millisecond
+ continue
+ }
+ case <-doneCh:
+ // [LocalBackend.WatchNotificationsAs] has already returned, so no further
+ // notifications will be received. There's no reason to wait any longer.
+ case <-time.After(timeout):
+ }
+ cancel()
+ <-doneCh
+ break
+ }
+
+ // Report missing notifications, if any, and log all received notifications,
+ // including both expected and unexpected ones.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if hasMissing := slices.Contains(w.gotWanted, nil); hasMissing {
+ want := make([]string, len(w.want))
+ got := make([]string, 0, len(w.want))
+ for i, wn := range w.want {
+ want[i] = wn.name
+ if w.gotWanted[i] != nil {
+ got = append(got, wn.name)
+ }
+ }
+ w.tb.Errorf("Notifications(%s): got %q; want %q", actorDescriptionForTest(w.actor), strings.Join(got, ", "), strings.Join(want, ", "))
+ for i, n := range w.got {
+ w.tb.Logf("%d. %v", i, n)
+ }
+ return nil
+ }
+
+ return w.gotWanted
+}
+
+func actorDescriptionForTest(actor ipnauth.Actor) string {
+ var parts []string
+ if actor != nil {
+ if name, _ := actor.Username(); name != "" {
+ parts = append(parts, name)
+ }
+ if uid := actor.UserID(); uid != "" {
+ parts = append(parts, string(uid))
+ }
+ if clientID, _ := actor.ClientID(); clientID != ipnauth.NoClientID {
+ parts = append(parts, clientID.String())
+ }
+ }
+ return fmt.Sprintf("Actor{%s}", strings.Join(parts, ", "))
+}
+
+func TestLoginNotifications(t *testing.T) {
+ const (
+ enableLogging = true
+ controlURL = "https://localhost:1/"
+ loginURL = "https://localhost:1/1"
+ )
+
+ wantBrowseToURL := wantedNotification{
+ name: "BrowseToURL",
+ cond: func(t testing.TB, actor ipnauth.Actor, n *ipn.Notify) bool {
+ if n.BrowseToURL != nil && *n.BrowseToURL != loginURL {
+ t.Errorf("BrowseToURL (%s): got %q; want %q", actorDescriptionForTest(actor), *n.BrowseToURL, loginURL)
+ return false
+ }
+ return n.BrowseToURL != nil
+ },
+ }
+ unexpectedBrowseToURL := func(t testing.TB, actor ipnauth.Actor, n *ipn.Notify) bool {
+ if n.BrowseToURL != nil {
+ t.Errorf("Unexpected BrowseToURL(%s): %v", actorDescriptionForTest(actor), n)
+ return true
+ }
+ return false
+ }
+
+ tests := []struct {
+ name string
+ logInAs ipnauth.Actor
+ urlExpectedBy []ipnauth.Actor
+ urlUnexpectedBy []ipnauth.Actor
+ }{
+ {
+ name: "NoObservers",
+ logInAs: &ipnauth.TestActor{UID: "A"},
+ urlExpectedBy: []ipnauth.Actor{}, // ensure that it does not panic if no one is watching
+ },
+ {
+ name: "SingleUser",
+ logInAs: &ipnauth.TestActor{UID: "A"},
+ urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}},
+ },
+ {
+ name: "SameUser/TwoSessions/NoCID",
+ logInAs: &ipnauth.TestActor{UID: "A"},
+ urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}, &ipnauth.TestActor{UID: "A"}},
+ },
+ {
+ name: "SameUser/TwoSessions/OneWithCID",
+ logInAs: &ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")},
+ urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}},
+ urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}},
+ },
+ {
+ name: "SameUser/TwoSessions/BothWithCID",
+ logInAs: &ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")},
+ urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}},
+ urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("456")}},
+ },
+ {
+ name: "DifferentUsers/NoCID",
+ logInAs: &ipnauth.TestActor{UID: "A"},
+ urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A"}},
+ urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "B"}},
+ },
+ {
+ name: "DifferentUsers/SameCID",
+ logInAs: &ipnauth.TestActor{UID: "A"},
+ urlExpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "A", CID: ipnauth.ClientIDFrom("123")}},
+ urlUnexpectedBy: []ipnauth.Actor{&ipnauth.TestActor{UID: "B", CID: ipnauth.ClientIDFrom("123")}},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client {
+ return newClient(tb, opts)
+ })
+ if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ControlURLSet: true, Prefs: ipn.Prefs{ControlURL: controlURL}}); err != nil {
+ t.Fatalf("(*EditPrefs).Start(): %v", err)
+ }
+ if err := lb.Start(ipn.Options{}); err != nil {
+ t.Fatalf("(*LocalBackend).Start(): %v", err)
+ }
+
+ sessions := make([]*notificationWatcher, 0, len(tt.urlExpectedBy)+len(tt.urlUnexpectedBy))
+ for _, actor := range tt.urlExpectedBy {
+ session := newNotificationWatcher(t, lb, actor)
+ session.watch(0, []wantedNotification{wantBrowseToURL})
+ sessions = append(sessions, session)
+ }
+ for _, actor := range tt.urlUnexpectedBy {
+ session := newNotificationWatcher(t, lb, actor)
+ session.watch(0, nil, unexpectedBrowseToURL)
+ sessions = append(sessions, session)
+ }
+
+ if err := lb.StartLoginInteractiveAs(context.Background(), tt.logInAs); err != nil {
+ t.Fatal(err)
+ }
+
+ lb.cc.(*mockControl).send(nil, loginURL, false, nil)
+
+ var wg sync.WaitGroup
+ wg.Add(len(sessions))
+ for _, sess := range sessions {
+ go func() { // check all sessions in parallel
+ sess.check()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ })
+ }
+}
+
+// TestConfigFileReload tests that the LocalBackend reloads its configuration
+// when the configuration file changes.
+func TestConfigFileReload(t *testing.T) {
+ cfg1 := `{"Hostname": "foo", "Version": "alpha0"}`
+ f := filepath.Join(t.TempDir(), "cfg")
+ must.Do(os.WriteFile(f, []byte(cfg1), 0600))
+ sys := new(tsd.System)
+ sys.InitialConfig = must.Get(conffile.Load(f))
+ lb := newTestLocalBackendWithSys(t, sys)
+ must.Do(lb.Start(ipn.Options{}))
+
+ lb.mu.Lock()
+ hn := lb.hostinfo.Hostname
+ lb.mu.Unlock()
+ if hn != "foo" {
+ t.Fatalf("got %q; want %q", hn, "foo")
+ }
+
+ cfg2 := `{"Hostname": "bar", "Version": "alpha0"}`
+ must.Do(os.WriteFile(f, []byte(cfg2), 0600))
+ if !must.Get(lb.ReloadConfig()) {
+ t.Fatal("reload failed")
+ }
+
+ lb.mu.Lock()
+ hn = lb.hostinfo.Hostname
+ lb.mu.Unlock()
+ if hn != "bar" {
+ t.Fatalf("got %q; want %q", hn, "bar")
+ }
+}
+
+func TestGetVIPServices(t *testing.T) {
+ tests := []struct {
+ name string
+ advertised []string
+ mapped []string
+ want []*tailcfg.VIPService
+ }{
+ {
+ "advertised-only",
+ []string{"svc:abc", "svc:def"},
+ []string{},
+ []*tailcfg.VIPService{
+ {
+ Name: "svc:abc",
+ Active: true,
+ },
+ {
+ Name: "svc:def",
+ Active: true,
+ },
+ },
+ },
+ {
+ "mapped-only",
+ []string{},
+ []string{"svc:abc"},
+ []*tailcfg.VIPService{
+ {
+ Name: "svc:abc",
+ Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}},
+ },
+ },
+ },
+ {
+ "mapped-and-advertised",
+ []string{"svc:abc"},
+ []string{"svc:abc"},
+ []*tailcfg.VIPService{
+ {
+ Name: "svc:abc",
+ Active: true,
+ Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}},
+ },
+ },
+ },
+ {
+ "mapped-and-advertised-separately",
+ []string{"svc:def"},
+ []string{"svc:abc"},
+ []*tailcfg.VIPService{
+ {
+ Name: "svc:abc",
+ Ports: []tailcfg.ProtoPortRange{{Ports: tailcfg.PortRangeAny}},
+ },
+ {
+ Name: "svc:def",
+ Active: true,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ envknob.Setenv("TS_DEBUG_ALLPORTS_SERVICES", strings.Join(tt.mapped, ","))
+ prefs := &ipn.Prefs{
+ AdvertiseServices: tt.advertised,
+ }
+ got := vipServicesFromPrefs(prefs.View())
+ slices.SortFunc(got, func(a, b *tailcfg.VIPService) int {
+ return strings.Compare(a.Name, b.Name)
+ })
+ if !reflect.DeepEqual(tt.want, got) {
+ t.Logf("want:")
+ for _, s := range tt.want {
+ t.Logf("%+v", s)
+ }
+ t.Logf("got:")
+ for _, s := range got {
+ t.Logf("%+v", s)
+ }
+ t.Fail()
+ return
+ }
+ })
+ }
+}
+
+func TestUpdatePrefsOnSysPolicyChange(t *testing.T) {
+ const enableLogging = false
+
+ type fieldChange struct {
+ name string
+ want any
+ }
+
+ wantPrefsChanges := func(want ...fieldChange) *wantedNotification {
+ return &wantedNotification{
+ name: "Prefs",
+ cond: func(t testing.TB, actor ipnauth.Actor, n *ipn.Notify) bool {
+ if n.Prefs != nil {
+ prefs := reflect.Indirect(reflect.ValueOf(n.Prefs.AsStruct()))
+ for _, f := range want {
+ got := prefs.FieldByName(f.name).Interface()
+ if !reflect.DeepEqual(got, f.want) {
+ t.Errorf("%v: got %v; want %v", f.name, got, f.want)
+ }
+ }
+ }
+ return n.Prefs != nil
+ },
+ }
+ }
+
+ unexpectedPrefsChange := func(t testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool {
+ if n.Prefs != nil {
+ t.Errorf("Unexpected Prefs: %v", n.Prefs.Pretty())
+ return true
+ }
+ return false
+ }
+
+ tests := []struct {
+ name string
+ initialPrefs *ipn.Prefs
+ stringSettings []source.TestSetting[string]
+ want *wantedNotification
+ }{
+ {
+ name: "ShieldsUp/True",
+ stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "never")},
+ want: wantPrefsChanges(fieldChange{"ShieldsUp", true}),
+ },
+ {
+ name: "ShieldsUp/False",
+ initialPrefs: &ipn.Prefs{ShieldsUp: true},
+ stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "always")},
+ want: wantPrefsChanges(fieldChange{"ShieldsUp", false}),
+ },
+ {
+ name: "ExitNodeID",
+ stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.ExitNodeID, "foo")},
+ want: wantPrefsChanges(fieldChange{"ExitNodeID", tailcfg.StableNodeID("foo")}),
+ },
+ {
+ name: "EnableRunExitNode",
+ stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableRunExitNode, "always")},
+ want: wantPrefsChanges(fieldChange{"AdvertiseRoutes", []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}}),
+ },
+ {
+ name: "Multiple",
+ initialPrefs: &ipn.Prefs{
+ ExitNodeAllowLANAccess: true,
+ },
+ stringSettings: []source.TestSetting[string]{
+ source.TestSettingOf(syspolicy.EnableServerMode, "always"),
+ source.TestSettingOf(syspolicy.ExitNodeAllowLANAccess, "never"),
+ source.TestSettingOf(syspolicy.ExitNodeIP, "127.0.0.1"),
+ },
+ want: wantPrefsChanges(
+ fieldChange{"ForceDaemon", true},
+ fieldChange{"ExitNodeAllowLANAccess", false},
+ fieldChange{"ExitNodeIP", netip.MustParseAddr("127.0.0.1")},
+ ),
+ },
+ {
+ name: "NoChange",
+ initialPrefs: &ipn.Prefs{
+ CorpDNS: true,
+ ExitNodeID: "foo",
+ AdvertiseRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()},
+ },
+ stringSettings: []source.TestSetting[string]{
+ source.TestSettingOf(syspolicy.EnableTailscaleDNS, "always"),
+ source.TestSettingOf(syspolicy.ExitNodeID, "foo"),
+ source.TestSettingOf(syspolicy.EnableRunExitNode, "always"),
+ },
+ want: nil, // syspolicy settings match the preferences; no change notification is expected.
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ syspolicy.RegisterWellKnownSettingsForTest(t)
+ store := source.NewTestStoreOf[string](t)
+ syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store)
+
+ lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client {
+ return newClient(tb, opts)
+ })
+ if tt.initialPrefs != nil {
+ lb.SetPrefsForTest(tt.initialPrefs)
+ }
+ if err := lb.Start(ipn.Options{}); err != nil {
+ t.Fatalf("(*LocalBackend).Start(): %v", err)
+ }
+
+ nw := newNotificationWatcher(t, lb, &ipnauth.TestActor{})
+ if tt.want != nil {
+ nw.watch(0, []wantedNotification{*tt.want})
+ } else {
+ nw.watch(0, nil, unexpectedPrefsChange)
+ }
+
+ store.SetStrings(tt.stringSettings...)
+
+ nw.check()
+ })
+ }
+}
diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go
index d20bf94eb971a..bf14d339ed890 100644
--- a/ipn/ipnlocal/network-lock.go
+++ b/ipn/ipnlocal/network-lock.go
@@ -430,8 +430,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per
}
bootstrapStateID := fmt.Sprintf("%d:%d", genesis.State.StateID1, genesis.State.StateID2)
- for i := range persist.DisallowedTKAStateIDs().Len() {
- stateID := persist.DisallowedTKAStateIDs().At(i)
+ for _, stateID := range persist.DisallowedTKAStateIDs().All() {
if stateID == bootstrapStateID {
return fmt.Errorf("TKA with stateID of %q is disallowed on this node", stateID)
}
@@ -572,8 +571,7 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer {
TailscaleIPs: make([]netip.Addr, 0, p.Addresses().Len()),
NodeKey: p.Key(),
}
- for i := range p.Addresses().Len() {
- addr := p.Addresses().At(i)
+ for _, addr := range p.Addresses().All() {
if addr.IsSingleIP() && tsaddr.IsTailscaleIP(addr.Addr()) {
fp.TailscaleIPs = append(fp.TailscaleIPs, addr.Addr())
}
diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go
index 67d521f0968eb..61bed05527167 100644
--- a/ipn/ipnlocal/serve.go
+++ b/ipn/ipnlocal/serve.go
@@ -242,8 +242,7 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1
}
addrs := nm.GetAddresses()
- for i := range addrs.Len() {
- a := addrs.At(i)
+ for _, a := range addrs.All() {
for _, p := range ports {
addrPort := netip.AddrPortFrom(a.Addr(), p)
if _, ok := b.serveListeners[addrPort]; ok {
diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go
index fbeb19bd15bd1..383d03f5aa9be 100644
--- a/ipn/ipnlocal/ssh.go
+++ b/ipn/ipnlocal/ssh.go
@@ -27,7 +27,7 @@ import (
"github.com/tailscale/golang-x-crypto/ssh"
"go4.org/mem"
"tailscale.com/tailcfg"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
"tailscale.com/util/mak"
)
@@ -80,30 +80,32 @@ func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*ta
if err != nil {
return nil, err
}
- lineread.Reader(bytes.NewReader(out), func(line []byte) error {
+ for line := range lineiter.Bytes(out) {
line = bytes.TrimSpace(line)
if len(line) == 0 || line[0] == '_' {
- return nil
+ continue
}
add(string(line))
- return nil
- })
+ }
default:
- lineread.File("/etc/passwd", func(line []byte) error {
+ for lr := range lineiter.File("/etc/passwd") {
+ line, err := lr.Value()
+ if err != nil {
+ break
+ }
line = bytes.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == '_' {
- return nil
+ continue
}
if mem.HasSuffix(mem.B(line), mem.S("/nologin")) ||
mem.HasSuffix(mem.B(line), mem.S("/false")) {
- return nil
+ continue
}
colon := bytes.IndexByte(line, ':')
if colon != -1 {
add(string(line[:colon]))
}
- return nil
- })
+ }
}
return res, nil
}
diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go
index bebd0152b5a36..ef4b0ed62809f 100644
--- a/ipn/ipnlocal/state_test.go
+++ b/ipn/ipnlocal/state_test.go
@@ -309,6 +309,7 @@ func TestStateMachine(t *testing.T) {
if err != nil {
t.Fatalf("NewLocalBackend: %v", err)
}
+ t.Cleanup(b.Shutdown)
b.DisablePortMapperForTest()
var cc, previousCC *mockControl
@@ -942,6 +943,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) {
if err != nil {
t.Fatalf("NewLocalBackend: %v", err)
}
+ t.Cleanup(b.Shutdown)
b.hostinfo = &tailcfg.Hostinfo{OS: "testos"}
b.pm.SetPrefs((&ipn.Prefs{
Persist: &persist.Persist{
@@ -1023,6 +1025,7 @@ func TestWGEngineStatusRace(t *testing.T) {
sys.Set(eng)
b, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0)
c.Assert(err, qt.IsNil)
+ t.Cleanup(b.Shutdown)
var cc *mockControl
b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) {
diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go
index ccde9f01dced0..37fc31819dac4 100644
--- a/ipn/ipnlocal/web_client.go
+++ b/ipn/ipnlocal/web_client.go
@@ -121,8 +121,8 @@ func (b *LocalBackend) updateWebClientListenersLocked() {
}
addrs := b.netMap.GetAddresses()
- for i := range addrs.Len() {
- addrPort := netip.AddrPortFrom(addrs.At(i).Addr(), webClientPort)
+ for _, pfx := range addrs.All() {
+ addrPort := netip.AddrPortFrom(pfx.Addr(), webClientPort)
if _, ok := b.webClientListeners[addrPort]; ok {
continue // already listening
}
diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go
index 761c9816cab27..63d4b183ca11d 100644
--- a/ipn/ipnserver/actor.go
+++ b/ipn/ipnserver/actor.go
@@ -31,6 +31,7 @@ type actor struct {
logf logger.Logf
ci *ipnauth.ConnIdentity
+ clientID ipnauth.ClientID
isLocalSystem bool // whether the actor is the Windows' Local System identity.
}
@@ -39,7 +40,22 @@ func newActor(logf logger.Logf, c net.Conn) (*actor, error) {
if err != nil {
return nil, err
}
- return &actor{logf: logf, ci: ci, isLocalSystem: connIsLocalSystem(ci)}, nil
+ var clientID ipnauth.ClientID
+ if pid := ci.Pid(); pid != 0 {
+ // Derive [ipnauth.ClientID] from the PID of the connected client process.
+ // TODO(nickkhyl): This is transient and will be re-worked as we
+ // progress on tailscale/corp#18342. At minimum, we should use a 2-tuple
+ // (PID + StartTime) or a 3-tuple (PID + StartTime + UID) to identify
+ // the client process. This helps prevent security issues where a
+ // terminated client process's PID could be reused by a different
+ // process. This is not currently an issue as we allow only one user to
+ // connect anyway.
+ // Additionally, we should consider caching authentication results since
+ // operations like retrieving a username by SID might require network
+ // connectivity on domain-joined devices and/or be slow.
+ clientID = ipnauth.ClientIDFrom(pid)
+ }
+ return &actor{logf: logf, ci: ci, clientID: clientID, isLocalSystem: connIsLocalSystem(ci)}, nil
}
// IsLocalSystem implements [ipnauth.Actor].
@@ -61,6 +77,11 @@ func (a *actor) pid() int {
return a.ci.Pid()
}
+// ClientID implements [ipnauth.Actor].
+func (a *actor) ClientID() (_ ipnauth.ClientID, ok bool) {
+ return a.clientID, a.clientID != ipnauth.NoClientID
+}
+
// Username implements [ipnauth.Actor].
func (a *actor) Username() (string, error) {
if a.ci == nil {
diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go
index 528304bab77d4..c14a4bdf285df 100644
--- a/ipn/localapi/localapi.go
+++ b/ipn/localapi/localapi.go
@@ -62,7 +62,8 @@ import (
"tailscale.com/util/osdiag"
"tailscale.com/util/progresstracking"
"tailscale.com/util/rands"
- "tailscale.com/util/testenv"
+ "tailscale.com/util/syspolicy/rsop"
+ "tailscale.com/util/syspolicy/setting"
"tailscale.com/version"
"tailscale.com/wgengine/magicsock"
)
@@ -77,6 +78,7 @@ var handler = map[string]localAPIHandler{
"cert/": (*Handler).serveCert,
"file-put/": (*Handler).serveFilePut,
"files/": (*Handler).serveFiles,
+ "policy/": (*Handler).servePolicy,
"profiles/": (*Handler).serveProfiles,
// The other /localapi/v0/NAME handlers are exact matches and contain only NAME
@@ -98,6 +100,7 @@ var handler = map[string]localAPIHandler{
"derpmap": (*Handler).serveDERPMap,
"dev-set-state-store": (*Handler).serveDevSetStateStore,
"dial": (*Handler).serveDial,
+ "disconnect-control": (*Handler).disconnectControl,
"dns-osconfig": (*Handler).serveDNSOSConfig,
"dns-query": (*Handler).serveDNSQuery,
"drive/fileserver-address": (*Handler).serveDriveServerAddr,
@@ -560,6 +563,7 @@ func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) {
}
func (h *Handler) serveMetrics(w http.ResponseWriter, r *http.Request) {
+ metricDebugMetricsCalls.Add(1)
// Require write access out of paranoia that the metrics
// might contain something sensitive.
if !h.PermitWrite {
@@ -570,15 +574,10 @@ func (h *Handler) serveMetrics(w http.ResponseWriter, r *http.Request) {
clientmetric.WritePrometheusExpositionFormat(w)
}
-// TODO(kradalby): Remove this once we have landed on a final set of
-// metrics to export to clients and consider the metrics stable.
-var debugUsermetricsEndpoint = envknob.RegisterBool("TS_DEBUG_USER_METRICS")
-
+// serveUserMetrics returns user-facing metrics in Prometheus text
+// exposition format.
func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) {
- if !testenv.InTest() && !debugUsermetricsEndpoint() {
- http.Error(w, "usermetrics debug flag not enabled", http.StatusForbidden)
- return
- }
+ metricUserMetricsCalls.Add(1)
h.b.UserMetricsRegistry().Handler(w, r)
}
@@ -635,6 +634,13 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) {
}
case "pick-new-derp":
err = h.b.DebugPickNewDERP()
+ case "force-prefer-derp":
+ var n int
+ err = json.NewDecoder(r.Body).Decode(&n)
+ if err != nil {
+ break
+ }
+ h.b.DebugForcePreferDERP(n)
case "":
err = fmt.Errorf("missing parameter 'action'")
default:
@@ -956,6 +962,22 @@ func (h *Handler) servePprof(w http.ResponseWriter, r *http.Request) {
servePprofFunc(w, r)
}
+// disconnectControl is the handler for local API /disconnect-control endpoint that shuts down control client, so that
+// node no longer communicates with control. Doing this makes control consider this node inactive. This can be used
+// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the
+// peers to switch over to another replica whilst still maintaining th existing peer connections.
+func (h *Handler) disconnectControl(w http.ResponseWriter, r *http.Request) {
+ if !h.PermitWrite {
+ http.Error(w, "access denied", http.StatusForbidden)
+ return
+ }
+ if r.Method != httpm.POST {
+ http.Error(w, "use POST", http.StatusMethodNotAllowed)
+ return
+ }
+ h.b.DisconnectControl()
+}
+
func (h *Handler) reloadConfig(w http.ResponseWriter, r *http.Request) {
if !h.PermitWrite {
http.Error(w, "access denied", http.StatusForbidden)
@@ -1231,7 +1253,7 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
ctx := r.Context()
enc := json.NewEncoder(w)
- h.b.WatchNotifications(ctx, mask, f.Flush, func(roNotify *ipn.Notify) (keepGoing bool) {
+ h.b.WatchNotificationsAs(ctx, h.Actor, mask, f.Flush, func(roNotify *ipn.Notify) (keepGoing bool) {
err := enc.Encode(roNotify)
if err != nil {
h.logf("json.Encode: %v", err)
@@ -1251,7 +1273,7 @@ func (h *Handler) serveLoginInteractive(w http.ResponseWriter, r *http.Request)
http.Error(w, "want POST", http.StatusBadRequest)
return
}
- h.b.StartLoginInteractive(r.Context())
+ h.b.StartLoginInteractiveAs(r.Context(), h.Actor)
w.WriteHeader(http.StatusNoContent)
return
}
@@ -1339,6 +1361,53 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) {
e.Encode(prefs)
}
+func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) {
+ if !h.PermitRead {
+ http.Error(w, "policy access denied", http.StatusForbidden)
+ return
+ }
+
+ suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/")
+ if !ok {
+ http.Error(w, "misconfigured", http.StatusInternalServerError)
+ return
+ }
+
+ var scope setting.PolicyScope
+ if suffix == "" {
+ scope = setting.DefaultScope()
+ } else if err := scope.UnmarshalText([]byte(suffix)); err != nil {
+ http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest)
+ return
+ }
+
+ policy, err := rsop.PolicyFor(scope)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var effectivePolicy *setting.Snapshot
+ switch r.Method {
+ case "GET":
+ effectivePolicy = policy.Get()
+ case "POST":
+ effectivePolicy, err = policy.Reload()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ default:
+ http.Error(w, "unsupported method", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ e := json.NewEncoder(w)
+ e.SetIndent("", "\t")
+ e.Encode(effectivePolicy)
+}
+
type resJSON struct {
Error string `json:",omitempty"`
}
@@ -2912,7 +2981,9 @@ var (
metricInvalidRequests = clientmetric.NewCounter("localapi_invalid_requests")
// User-visible LocalAPI endpoints.
- metricFilePutCalls = clientmetric.NewCounter("localapi_file_put")
+ metricFilePutCalls = clientmetric.NewCounter("localapi_file_put")
+ metricDebugMetricsCalls = clientmetric.NewCounter("localapi_debugmetric_requests")
+ metricUserMetricsCalls = clientmetric.NewCounter("localapi_usermetric_requests")
)
// serveSuggestExitNode serves a POST endpoint for returning a suggested exit node.
diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go
index fa54a1e756a7e..145910830e80f 100644
--- a/ipn/localapi/localapi_test.go
+++ b/ipn/localapi/localapi_test.go
@@ -39,23 +39,6 @@ import (
"tailscale.com/wgengine"
)
-var _ ipnauth.Actor = (*testActor)(nil)
-
-type testActor struct {
- uid ipn.WindowsUserID
- name string
- isLocalSystem bool
- isLocalAdmin bool
-}
-
-func (u *testActor) UserID() ipn.WindowsUserID { return u.uid }
-
-func (u *testActor) Username() (string, error) { return u.name, nil }
-
-func (u *testActor) IsLocalSystem() bool { return u.isLocalSystem }
-
-func (u *testActor) IsLocalAdmin(operatorUID string) bool { return u.isLocalAdmin }
-
func TestValidHost(t *testing.T) {
tests := []struct {
host string
@@ -207,7 +190,7 @@ func TestWhoIsArgTypes(t *testing.T) {
func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) {
newHandler := func(connIsLocalAdmin bool) *Handler {
- return &Handler{Actor: &testActor{isLocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)}
+ return &Handler{Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)}
}
tests := []struct {
name string
@@ -366,6 +349,7 @@ func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend {
if err != nil {
t.Fatalf("NewLocalBackend: %v", err)
}
+ t.Cleanup(lb.Shutdown)
return lb
}
diff --git a/ipn/prefs.go b/ipn/prefs.go
index 5d61f0119cd23..f5406f3b732e0 100644
--- a/ipn/prefs.go
+++ b/ipn/prefs.go
@@ -179,6 +179,12 @@ type Prefs struct {
// node.
AdvertiseRoutes []netip.Prefix
+ // AdvertiseServices specifies the list of services that this
+ // node can serve as a destination for. Note that an advertised
+ // service must still go through the approval process from the
+ // control server.
+ AdvertiseServices []string
+
// NoSNAT specifies whether to source NAT traffic going to
// destinations in AdvertiseRoutes. The default is to apply source
// NAT, which makes the traffic appear to come from the router
@@ -319,6 +325,7 @@ type MaskedPrefs struct {
ForceDaemonSet bool `json:",omitempty"`
EggSet bool `json:",omitempty"`
AdvertiseRoutesSet bool `json:",omitempty"`
+ AdvertiseServicesSet bool `json:",omitempty"`
NoSNATSet bool `json:",omitempty"`
NoStatefulFilteringSet bool `json:",omitempty"`
NetfilterModeSet bool `json:",omitempty"`
@@ -527,6 +534,9 @@ func (p *Prefs) pretty(goos string) string {
if len(p.AdvertiseTags) > 0 {
fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ","))
}
+ if len(p.AdvertiseServices) > 0 {
+ fmt.Fprintf(&sb, "services=%s ", strings.Join(p.AdvertiseServices, ","))
+ }
if goos == "linux" {
fmt.Fprintf(&sb, "nf=%v ", p.NetfilterMode)
}
@@ -598,6 +608,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool {
p.ForceDaemon == p2.ForceDaemon &&
compareIPNets(p.AdvertiseRoutes, p2.AdvertiseRoutes) &&
compareStrings(p.AdvertiseTags, p2.AdvertiseTags) &&
+ compareStrings(p.AdvertiseServices, p2.AdvertiseServices) &&
p.Persist.Equals(p2.Persist) &&
p.ProfileName == p2.ProfileName &&
p.AutoUpdate.Equals(p2.AutoUpdate) &&
diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go
index dcb999ef56a64..31671c0f8e4ef 100644
--- a/ipn/prefs_test.go
+++ b/ipn/prefs_test.go
@@ -54,6 +54,7 @@ func TestPrefsEqual(t *testing.T) {
"ForceDaemon",
"Egg",
"AdvertiseRoutes",
+ "AdvertiseServices",
"NoSNAT",
"NoStatefulFiltering",
"NetfilterMode",
@@ -330,6 +331,16 @@ func TestPrefsEqual(t *testing.T) {
&Prefs{NetfilterKind: ""},
false,
},
+ {
+ &Prefs{AdvertiseServices: []string{"svc:tux", "svc:xenia"}},
+ &Prefs{AdvertiseServices: []string{"svc:tux", "svc:xenia"}},
+ true,
+ },
+ {
+ &Prefs{AdvertiseServices: []string{"svc:tux", "svc:xenia"}},
+ &Prefs{AdvertiseServices: []string{"svc:tux", "svc:amelie"}},
+ false,
+ },
}
for i, tt := range tests {
got := tt.a.Equals(tt.b)
diff --git a/ipn/serve.go b/ipn/serve.go
index 5c0a97ed3ffa9..49e0d9fa3d67a 100644
--- a/ipn/serve.go
+++ b/ipn/serve.go
@@ -24,6 +24,23 @@ func ServeConfigKey(profileID ProfileID) StateKey {
return StateKey("_serve/" + profileID)
}
+// ServiceConfig contains the config information for a single service.
+// it contains a bool to indicate if the service is in Tun mode (L3 forwarding).
+// If the service is not in Tun mode, the service is configured by the L4 forwarding
+// (TCP ports) and/or the L7 forwarding (http handlers) information.
+type ServiceConfig struct {
+ // TCP are the list of TCP port numbers that tailscaled should handle for
+ // the Tailscale IP addresses. (not subnet routers, etc)
+ TCP map[uint16]*TCPPortHandler `json:",omitempty"`
+
+ // Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers
+ // keyed by mount point ("/", "/foo", etc)
+ Web map[HostPort]*WebServerConfig `json:",omitempty"`
+
+ // Tun determines if the service should be using L3 forwarding (Tun mode).
+ Tun bool `json:",omitempty"`
+}
+
// ServeConfig is the JSON type stored in the StateStore for
// StateKey "_serve/$PROFILE_ID" as returned by ServeConfigKey.
type ServeConfig struct {
@@ -35,6 +52,10 @@ type ServeConfig struct {
// keyed by mount point ("/", "/foo", etc)
Web map[HostPort]*WebServerConfig `json:",omitempty"`
+ // Services maps from service name to a ServiceConfig. Which describes the
+ // L3, L4, and L7 forwarding information for the service.
+ Services map[string]*ServiceConfig `json:",omitempty"`
+
// AllowFunnel is the set of SNI:port values for which funnel
// traffic is allowed, from trusted ingress peers.
AllowFunnel map[HostPort]bool `json:",omitempty"`
diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go
index 00950bd3b2394..462e6d43425ff 100644
--- a/ipn/store/kubestore/store_kube.go
+++ b/ipn/store/kubestore/store_kube.go
@@ -7,27 +7,46 @@ package kubestore
import (
"context"
"fmt"
+ "log"
"net"
"os"
"strings"
"time"
"tailscale.com/ipn"
+ "tailscale.com/ipn/store/mem"
"tailscale.com/kube/kubeapi"
"tailscale.com/kube/kubeclient"
"tailscale.com/types/logger"
)
+const (
+ // timeout is the timeout for a single state update that includes calls to the API server to write or read a
+ // state Secret and emit an Event.
+ timeout = 30 * time.Second
+
+ reasonTailscaleStateUpdated = "TailscaledStateUpdated"
+ reasonTailscaleStateLoaded = "TailscaleStateLoaded"
+ reasonTailscaleStateUpdateFailed = "TailscaleStateUpdateFailed"
+ reasonTailscaleStateLoadFailed = "TailscaleStateLoadFailed"
+ eventTypeWarning = "Warning"
+ eventTypeNormal = "Normal"
+)
+
// Store is an ipn.StateStore that uses a Kubernetes Secret for persistence.
type Store struct {
client kubeclient.Client
canPatch bool
secretName string
+
+ // memory holds the latest tailscale state. Writes write state to a kube Secret and memory, Reads read from
+ // memory.
+ memory mem.Store
}
-// New returns a new Store that persists to the named secret.
+// New returns a new Store that persists to the named Secret.
func New(_ logger.Logf, secretName string) (*Store, error) {
- c, err := kubeclient.New()
+ c, err := kubeclient.New("tailscale-state-store")
if err != nil {
return nil, err
}
@@ -39,11 +58,16 @@ func New(_ logger.Logf, secretName string) (*Store, error) {
if err != nil {
return nil, err
}
- return &Store{
+ s := &Store{
client: c,
canPatch: canPatch,
secretName: secretName,
- }, nil
+ }
+ // Load latest state from kube Secret if it already exists.
+ if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist {
+ return nil, fmt.Errorf("error loading state from kube Secret: %w", err)
+ }
+ return s, nil
}
func (s *Store) SetDialer(d func(ctx context.Context, network, address string) (net.Conn, error)) {
@@ -54,38 +78,27 @@ func (s *Store) String() string { return "kube.Store" }
// ReadState implements the StateStore interface.
func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) {
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- secret, err := s.client.GetSecret(ctx, s.secretName)
- if err != nil {
- if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 {
- return nil, ipn.ErrStateNotExist
- }
- return nil, err
- }
- b, ok := secret.Data[sanitizeKey(id)]
- if !ok {
- return nil, ipn.ErrStateNotExist
- }
- return b, nil
-}
-
-func sanitizeKey(k ipn.StateKey) string {
- // The only valid characters in a Kubernetes secret key are alphanumeric, -,
- // _, and .
- return strings.Map(func(r rune) rune {
- if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
- return r
- }
- return '_'
- }, string(k))
+ return s.memory.ReadState(ipn.StateKey(sanitizeKey(id)))
}
// WriteState implements the StateStore interface.
-func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
+func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer func() {
+ if err == nil {
+ s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs)
+ }
+ if err != nil {
+ if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil {
+ log.Printf("kubestore: error creating tailscaled state update Event: %v", err)
+ }
+ } else {
+ if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateUpdated, "Successfully updated tailscaled state Secret"); err != nil {
+ log.Printf("kubestore: error creating tailscaled state Event: %v", err)
+ }
+ }
+ cancel()
+ }()
secret, err := s.client.GetSecret(ctx, s.secretName)
if err != nil {
@@ -114,7 +127,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
Value: map[string][]byte{sanitizeKey(id): bs},
},
}
- if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil {
+ if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil {
return fmt.Errorf("error patching Secret %s with a /data field: %v", s.secretName, err)
}
return nil
@@ -126,8 +139,8 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
Value: bs,
},
}
- if err := s.client.JSONPatchSecret(ctx, s.secretName, m); err != nil {
- return fmt.Errorf("error patching Secret %s with /data/%s field", s.secretName, sanitizeKey(id))
+ if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil {
+ return fmt.Errorf("error patching Secret %s with /data/%s field: %v", s.secretName, sanitizeKey(id), err)
}
return nil
}
@@ -137,3 +150,35 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
}
return err
}
+
+func (s *Store) loadState() (err error) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ secret, err := s.client.GetSecret(ctx, s.secretName)
+ if err != nil {
+ if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 {
+ return ipn.ErrStateNotExist
+ }
+ if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateLoadFailed, err.Error()); err != nil {
+ log.Printf("kubestore: error creating Event: %v", err)
+ }
+ return err
+ }
+ if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateLoaded, "Successfully loaded tailscaled state from Secret"); err != nil {
+ log.Printf("kubestore: error creating Event: %v", err)
+ }
+ s.memory.LoadFromMap(secret.Data)
+ return nil
+}
+
+func sanitizeKey(k ipn.StateKey) string {
+ // The only valid characters in a Kubernetes secret key are alphanumeric, -,
+ // _, and .
+ return strings.Map(func(r rune) rune {
+ if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' {
+ return r
+ }
+ return '_'
+ }, string(k))
+}
diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go
index f3a308ae5dc4f..6f474ce993b43 100644
--- a/ipn/store/mem/store_mem.go
+++ b/ipn/store/mem/store_mem.go
@@ -9,8 +9,10 @@ import (
"encoding/json"
"sync"
+ xmaps "golang.org/x/exp/maps"
"tailscale.com/ipn"
"tailscale.com/types/logger"
+ "tailscale.com/util/mak"
)
// New returns a new Store.
@@ -28,6 +30,7 @@ type Store struct {
func (s *Store) String() string { return "mem.Store" }
// ReadState implements the StateStore interface.
+// It returns ipn.ErrStateNotExist if the state does not exist.
func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -39,6 +42,7 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) {
}
// WriteState implements the StateStore interface.
+// It never returns an error.
func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
s.mu.Lock()
defer s.mu.Unlock()
@@ -49,6 +53,19 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) error {
return nil
}
+// LoadFromMap loads the in-memory cache from the provided map.
+// Any existing content is cleared, and the provided map is
+// copied into the cache.
+func (s *Store) LoadFromMap(m map[string][]byte) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ xmaps.Clear(s.cache)
+ for k, v := range m {
+ mak.Set(&s.cache, ipn.StateKey(k), v)
+ }
+ return
+}
+
// LoadFromJSON attempts to unmarshal json content into the
// in-memory cache.
func (s *Store) LoadFromJSON(data []byte) error {
diff --git a/k8s-operator/api.md b/k8s-operator/api.md
index e8a6e248a2934..08e1284fe82e7 100644
--- a/k8s-operator/api.md
+++ b/k8s-operator/api.md
@@ -21,6 +21,22 @@
+#### AppConnector
+
+
+
+AppConnector defines a Tailscale app connector node configured via Connector.
+
+
+
+_Appears in:_
+- [ConnectorSpec](#connectorspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `routes` _[Routes](#routes)_ | Routes are optional preconfigured routes for the domains routed via the app connector.
If not set, routes for the domains will be discovered dynamically.
If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
also dynamically discover other routes.
https://tailscale.com/kb/1332/apps-best-practices#preconfiguration | | Format: cidr
MinItems: 1
Type: string
|
+
+
#### Connector
@@ -86,8 +102,9 @@ _Appears in:_
| `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
Defaults to [tag:k8s].
To autoapprove the subnet routes or exit node defined by a Connector,
you can configure Tailscale ACLs to give these tags the necessary
permissions.
See https://tailscale.com/kb/1337/acl-syntax#autoapprovers.
If you specify custom tags here, you must also make the operator an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Connector node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
|
| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
dashes, it must not start or end with a dash and must be between 2
and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
Type: string
|
| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
contains configuration options that should be applied to the
resources created for this Connector. If unset, the operator will
create resources with the default configuration. | | |
-| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector node should
expose to tailnet. If unset, none are exposed.
https://tailscale.com/kb/1019/subnets/ | | |
-| `exitNode` _boolean_ | ExitNode defines whether the Connector node should act as a
Tailscale exit node. Defaults to false.
https://tailscale.com/kb/1103/exit-nodes | | |
+| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector device should
expose to tailnet as a Tailscale subnet router.
https://tailscale.com/kb/1019/subnets/
If this field is unset, the device does not get configured as a Tailscale subnet router.
This field is mutually exclusive with the appConnector field. | | |
+| `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
Connector does not act as an app connector.
Note that you will need to manually configure the permissions and the domains for the app connector via the
Admin panel.
Note also that the main tested and supported use case of this config option is to deploy an app connector on
Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
tested or optimised for.
If you are using the app connector to access SaaS applications because you need a predictable egress IP that
can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
device with a static IP address.
https://tailscale.com/kb/1281/app-connectors | | |
+| `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
This field is mutually exclusive with the appConnector field.
https://tailscale.com/kb/1103/exit-nodes | | |
#### ConnectorStatus
@@ -106,6 +123,7 @@ _Appears in:_
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Connector.
Known condition types are `ConnectorReady`. | | |
| `subnetRoutes` _string_ | SubnetRoutes are the routes currently exposed to tailnet via this
Connector instance. | | |
| `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | |
+| `isAppConnector` _boolean_ | IsAppConnector is set to true if the Connector acts as an app connector. | | |
| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the Connector node. | | |
| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | |
@@ -127,7 +145,8 @@ _Appears in:_
| `image` _string_ | Container image name. By default images are pulled from
docker.io/tailscale/tailscale, but the official images are also
available at ghcr.io/tailscale/tailscale. Specifying image name here
will override any proxy image values specified via the Kubernetes
operator's Helm chart values or PROXY_IMAGE env var in the operator
Deployment.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | |
| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
|
| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
By default Tailscale Kubernetes operator does not apply any resource
requirements. The amount of resources required wil depend on the
amount of resources the operator needs to parse, usage patterns and
cluster size.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | |
-| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context by the operator.
By default the operator:
- sets 'privileged: true' for the init container
- set NET_ADMIN capability for tailscale container for proxies that
are created for Services or Connector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | |
+| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context set by the operator.
By default the operator sets the Tailscale container and the Tailscale init container to privileged
for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
installing device plugin in your cluster and configuring the proxies tun device to be created
by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | |
+| `debug` _[Debug](#debug)_ | Configuration for enabling extra debug information in the container.
Not recommended for production use. | | |
#### DNSConfig
@@ -230,6 +249,22 @@ _Appears in:_
| `nameserver` _[NameserverStatus](#nameserverstatus)_ | Nameserver describes the status of nameserver cluster resources. | | |
+#### Debug
+
+
+
+
+
+
+
+_Appears in:_
+- [Container](#container)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `enable` _boolean_ | Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
and internal debug metrics endpoint at :9001/debug/metrics, where
9001 is a container port named "debug". The endpoints and their responses
may change in backwards incompatible ways in the future, and should not
be considered stable.
In 1.78.x and 1.80.x, this setting will default to the value of
.spec.metrics.enable, and requests to the "metrics" port matching the
mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
this setting will default to false, and no requests will be proxied. | | |
+
+
#### Env
@@ -291,7 +326,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
at :9001/debug/metrics.
Defaults to false. | | |
+| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
at :9002/metrics.
A metrics Service named -metrics will also be created in the operator's namespace and will
serve the metrics at :9002/metrics.
In 1.78.x and 1.80.x, this field also serves as the default value for
.spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
fields will independently default to false.
Defaults to false. | | |
+| `serviceMonitor` _[ServiceMonitor](#servicemonitor)_ | Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
The ingested metrics for each Service monitor will have labels to identify the proxy:
ts_proxy_type: ingress_service\|ingress_resource\|connector\|proxygroup
ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
job: ts__[]_ | | |
#### Name
@@ -381,6 +417,7 @@ _Appears in:_
| `nodeName` _string_ | Proxy Pod's node name.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
| `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector.
By default Tailscale Kubernetes operator does not apply any node
selector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
By default Tailscale Kubernetes operator does not apply any
tolerations.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
+| `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
By default Tailscale Kubernetes operator does not apply any topology spread constraints.
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | |
#### ProxyClass
@@ -745,6 +782,7 @@ _Validation:_
- Type: string
_Appears in:_
+- [AppConnector](#appconnector)
- [SubnetRouter](#subnetrouter)
@@ -799,6 +837,22 @@ _Appears in:_
| `name` _string_ | The name of a Kubernetes Secret in the operator's namespace that contains
credentials for writing to the configured bucket. Each key-value pair
from the secret's data will be mounted as an environment variable. It
should include keys for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if
using a static access key. | | |
+#### ServiceMonitor
+
+
+
+
+
+
+
+_Appears in:_
+- [Metrics](#metrics)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `enable` _boolean_ | If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled. | | |
+
+
#### StatefulSet
diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go
index 70b411d120994..0880ac975732e 100644
--- a/k8s-operator/apis/v1alpha1/register.go
+++ b/k8s-operator/apis/v1alpha1/register.go
@@ -10,6 +10,7 @@ import (
"tailscale.com/k8s-operator/apis"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -39,12 +40,18 @@ func init() {
localSchemeBuilder.Register(addKnownTypes)
GlobalScheme = runtime.NewScheme()
+ // Add core types
if err := scheme.AddToScheme(GlobalScheme); err != nil {
panic(fmt.Sprintf("failed to add k8s.io scheme: %s", err))
}
+ // Add tailscale.com types
if err := AddToScheme(GlobalScheme); err != nil {
panic(fmt.Sprintf("failed to add tailscale.com scheme: %s", err))
}
+ // Add apiextensions types (CustomResourceDefinitions/CustomResourceDefinitionLists)
+ if err := apiextensionsv1.AddToScheme(GlobalScheme); err != nil {
+ panic(fmt.Sprintf("failed to add apiextensions.k8s.io scheme: %s", err))
+ }
}
// Adds the list of known types to api.Scheme.
diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go
index 27afd0838a388..0222584859bd6 100644
--- a/k8s-operator/apis/v1alpha1/types_connector.go
+++ b/k8s-operator/apis/v1alpha1/types_connector.go
@@ -22,6 +22,7 @@ var ConnectorKind = "Connector"
// +kubebuilder:resource:scope=Cluster,shortName=cn
// +kubebuilder:printcolumn:name="SubnetRoutes",type="string",JSONPath=`.status.subnetRoutes`,description="CIDR ranges exposed to tailnet by a subnet router defined via this Connector instance."
// +kubebuilder:printcolumn:name="IsExitNode",type="string",JSONPath=`.status.isExitNode`,description="Whether this Connector instance defines an exit node."
+// +kubebuilder:printcolumn:name="IsAppConnector",type="string",JSONPath=`.status.isAppConnector`,description="Whether this Connector instance is an app connector."
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ConnectorReady")].reason`,description="Status of the deployed Connector resources."
// Connector defines a Tailscale node that will be deployed in the cluster. The
@@ -55,7 +56,8 @@ type ConnectorList struct {
}
// ConnectorSpec describes a Tailscale node to be deployed in the cluster.
-// +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || self.exitNode == true",message="A Connector needs to be either an exit node or a subnet router, or both."
+// +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)",message="A Connector needs to have at least one of exit node, subnet router or app connector configured."
+// +kubebuilder:validation:XValidation:rule="!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))",message="The appConnector field is mutually exclusive with exitNode and subnetRouter fields."
type ConnectorSpec struct {
// Tags that the Tailscale node will be tagged with.
// Defaults to [tag:k8s].
@@ -82,13 +84,31 @@ type ConnectorSpec struct {
// create resources with the default configuration.
// +optional
ProxyClass string `json:"proxyClass,omitempty"`
- // SubnetRouter defines subnet routes that the Connector node should
- // expose to tailnet. If unset, none are exposed.
+ // SubnetRouter defines subnet routes that the Connector device should
+ // expose to tailnet as a Tailscale subnet router.
// https://tailscale.com/kb/1019/subnets/
+ // If this field is unset, the device does not get configured as a Tailscale subnet router.
+ // This field is mutually exclusive with the appConnector field.
// +optional
- SubnetRouter *SubnetRouter `json:"subnetRouter"`
- // ExitNode defines whether the Connector node should act as a
- // Tailscale exit node. Defaults to false.
+ SubnetRouter *SubnetRouter `json:"subnetRouter,omitempty"`
+ // AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
+ // configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
+ // Connector does not act as an app connector.
+ // Note that you will need to manually configure the permissions and the domains for the app connector via the
+ // Admin panel.
+ // Note also that the main tested and supported use case of this config option is to deploy an app connector on
+ // Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
+ // cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
+ // tested or optimised for.
+ // If you are using the app connector to access SaaS applications because you need a predictable egress IP that
+ // can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
+ // via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
+ // device with a static IP address.
+ // https://tailscale.com/kb/1281/app-connectors
+ // +optional
+ AppConnector *AppConnector `json:"appConnector,omitempty"`
+ // ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
+ // This field is mutually exclusive with the appConnector field.
// https://tailscale.com/kb/1103/exit-nodes
// +optional
ExitNode bool `json:"exitNode"`
@@ -104,6 +124,17 @@ type SubnetRouter struct {
AdvertiseRoutes Routes `json:"advertiseRoutes"`
}
+// AppConnector defines a Tailscale app connector node configured via Connector.
+type AppConnector struct {
+ // Routes are optional preconfigured routes for the domains routed via the app connector.
+ // If not set, routes for the domains will be discovered dynamically.
+ // If set, the app connector will immediately be able to route traffic using the preconfigured routes, but may
+ // also dynamically discover other routes.
+ // https://tailscale.com/kb/1332/apps-best-practices#preconfiguration
+ // +optional
+ Routes Routes `json:"routes"`
+}
+
type Tags []Tag
func (tags Tags) Stringify() []string {
@@ -156,6 +187,9 @@ type ConnectorStatus struct {
// IsExitNode is set to true if the Connector acts as an exit node.
// +optional
IsExitNode bool `json:"isExitNode"`
+ // IsAppConnector is set to true if the Connector acts as an app connector.
+ // +optional
+ IsAppConnector bool `json:"isAppConnector"`
// TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
// assigned to the Connector node.
// +optional
diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go
index 7f415bc340bd7..ef9a071d02bbe 100644
--- a/k8s-operator/apis/v1alpha1/types_proxyclass.go
+++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go
@@ -154,14 +154,40 @@ type Pod struct {
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ // Proxy Pod's topology spread constraints.
+ // By default Tailscale Kubernetes operator does not apply any topology spread constraints.
+ // https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
// +optional
+ TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
}
+// +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled"
type Metrics struct {
// Setting enable to true will make the proxy serve Tailscale metrics
- // at :9001/debug/metrics.
+ // at :9002/metrics.
+ // A metrics Service named -metrics will also be created in the operator's namespace and will
+ // serve the metrics at :9002/metrics.
+ //
+ // In 1.78.x and 1.80.x, this field also serves as the default value for
+ // .spec.statefulSet.pod.tailscaleContainer.debug.enable. From 1.82.0, both
+ // fields will independently default to false.
+ //
// Defaults to false.
Enable bool `json:"enable"`
+ // Enable to create a Prometheus ServiceMonitor for scraping the proxy's Tailscale metrics.
+ // The ServiceMonitor will select the metrics Service that gets created when metrics are enabled.
+ // The ingested metrics for each Service monitor will have labels to identify the proxy:
+ // ts_proxy_type: ingress_service|ingress_resource|connector|proxygroup
+ // ts_proxy_parent_name: name of the parent resource (i.e name of the Connector, Tailscale Ingress, Tailscale Service or ProxyGroup)
+ // ts_proxy_parent_namespace: namespace of the parent resource (if the parent resource is not cluster scoped)
+ // job: ts__[]_
+ // +optional
+ ServiceMonitor *ServiceMonitor `json:"serviceMonitor"`
+}
+
+type ServiceMonitor struct {
+ // If Enable is set to true, a Prometheus ServiceMonitor will be created. Enable can only be set to true if metrics are enabled.
+ Enable bool `json:"enable"`
}
type Container struct {
@@ -197,14 +223,35 @@ type Container struct {
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// Container security context.
- // Security context specified here will override the security context by the operator.
- // By default the operator:
- // - sets 'privileged: true' for the init container
- // - set NET_ADMIN capability for tailscale container for proxies that
- // are created for Services or Connector.
+ // Security context specified here will override the security context set by the operator.
+ // By default the operator sets the Tailscale container and the Tailscale init container to privileged
+ // for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
+ // You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
+ // installing device plugin in your cluster and configuring the proxies tun device to be created
+ // by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
// +optional
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
+ // Configuration for enabling extra debug information in the container.
+ // Not recommended for production use.
+ // +optional
+ Debug *Debug `json:"debug,omitempty"`
+}
+
+type Debug struct {
+ // Enable tailscaled's HTTP pprof endpoints at :9001/debug/pprof/
+ // and internal debug metrics endpoint at :9001/debug/metrics, where
+ // 9001 is a container port named "debug". The endpoints and their responses
+ // may change in backwards incompatible ways in the future, and should not
+ // be considered stable.
+ //
+ // In 1.78.x and 1.80.x, this setting will default to the value of
+ // .spec.metrics.enable, and requests to the "metrics" port matching the
+ // mux pattern /debug/ will be forwarded to the "debug" port. In 1.82.x,
+ // this setting will default to false, and no requests will be proxied.
+ //
+ // +optional
+ Enable bool `json:"enable"`
}
type Env struct {
diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go
index ba4ff40e46dd5..29c71cb90f309 100644
--- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go
+++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go
@@ -13,6 +13,26 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppConnector) DeepCopyInto(out *AppConnector) {
+ *out = *in
+ if in.Routes != nil {
+ in, out := &in.Routes, &out.Routes
+ *out = make(Routes, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppConnector.
+func (in *AppConnector) DeepCopy() *AppConnector {
+ if in == nil {
+ return nil
+ }
+ out := new(AppConnector)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Connector) DeepCopyInto(out *Connector) {
*out = *in
@@ -85,6 +105,11 @@ func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) {
*out = new(SubnetRouter)
(*in).DeepCopyInto(*out)
}
+ if in.AppConnector != nil {
+ in, out := &in.AppConnector, &out.AppConnector
+ *out = new(AppConnector)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec.
@@ -138,6 +163,11 @@ func (in *Container) DeepCopyInto(out *Container) {
*out = new(corev1.SecurityContext)
(*in).DeepCopyInto(*out)
}
+ if in.Debug != nil {
+ in, out := &in.Debug, &out.Debug
+ *out = new(Debug)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container.
@@ -256,6 +286,21 @@ func (in *DNSConfigStatus) DeepCopy() *DNSConfigStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Debug) DeepCopyInto(out *Debug) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Debug.
+func (in *Debug) DeepCopy() *Debug {
+ if in == nil {
+ return nil
+ }
+ out := new(Debug)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Env) DeepCopyInto(out *Env) {
*out = *in
@@ -274,6 +319,11 @@ func (in *Env) DeepCopy() *Env {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metrics) DeepCopyInto(out *Metrics) {
*out = *in
+ if in.ServiceMonitor != nil {
+ in, out := &in.ServiceMonitor, &out.ServiceMonitor
+ *out = new(ServiceMonitor)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics.
@@ -392,6 +442,13 @@ func (in *Pod) DeepCopyInto(out *Pod) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.TopologySpreadConstraints != nil {
+ in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
+ *out = make([]corev1.TopologySpreadConstraint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod.
@@ -474,7 +531,7 @@ func (in *ProxyClassSpec) DeepCopyInto(out *ProxyClassSpec) {
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = new(Metrics)
- **out = **in
+ (*in).DeepCopyInto(*out)
}
if in.TailscaleConfig != nil {
in, out := &in.TailscaleConfig, &out.TailscaleConfig
@@ -939,6 +996,21 @@ func (in *S3Secret) DeepCopy() *S3Secret {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor.
+func (in *ServiceMonitor) DeepCopy() *ServiceMonitor {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceMonitor)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
*out = *in
diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go
index f8ef951d415f0..43aa14e613887 100644
--- a/k8s-operator/sessionrecording/hijacker.go
+++ b/k8s-operator/sessionrecording/hijacker.go
@@ -102,7 +102,7 @@ type Hijacker struct {
// connection succeeds. In case of success, returns a list with a single
// successful recording attempt and an error channel. If the connection errors
// after having been established, an error is sent down the channel.
-type RecorderDialFn func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error)
+type RecorderDialFn func(context.Context, []netip.AddrPort, sessionrecording.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error)
// Hijack hijacks a 'kubectl exec' session and configures for the session
// contents to be sent to a recorder.
diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go
index 440d9c94294c9..e166ce63b3c85 100644
--- a/k8s-operator/sessionrecording/hijacker_test.go
+++ b/k8s-operator/sessionrecording/hijacker_test.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "net"
"net/http"
"net/netip"
"net/url"
@@ -20,6 +19,7 @@ import (
"go.uber.org/zap"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/k8s-operator/sessionrecording/fakes"
+ "tailscale.com/sessionrecording"
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
"tailscale.com/tstest"
@@ -80,7 +80,7 @@ func Test_Hijacker(t *testing.T) {
h := &Hijacker{
connectToRecorder: func(context.Context,
[]netip.AddrPort,
- func(context.Context, string, string) (net.Conn, error),
+ sessionrecording.DialFunc,
) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) {
if tt.failRecorderConnect {
err = errors.New("test")
diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go
index a1f225fe601c8..420d7e49c7ec2 100644
--- a/k8s-operator/utils.go
+++ b/k8s-operator/utils.go
@@ -32,9 +32,6 @@ type Records struct {
// TailscaledConfigFileName returns a tailscaled config file name in
// format expected by containerboot for the given CapVer.
func TailscaledConfigFileName(cap tailcfg.CapabilityVersion) string {
- if cap < 95 {
- return "tailscaled"
- }
return fmt.Sprintf("cap-%v.hujson", cap)
}
diff --git a/kube/kubeapi/api.go b/kube/kubeapi/api.go
index 0e42437a69a2a..a2ae8cc79f20d 100644
--- a/kube/kubeapi/api.go
+++ b/kube/kubeapi/api.go
@@ -7,7 +7,9 @@
// dependency size for those consumers when adding anything new here.
package kubeapi
-import "time"
+import (
+ "time"
+)
// Note: The API types are copied from k8s.io/api{,machinery} to not introduce a
// module dependency on the Kubernetes API as it pulls in many more dependencies.
@@ -151,6 +153,57 @@ type Secret struct {
Data map[string][]byte `json:"data,omitempty"`
}
+// Event contains a subset of fields from corev1.Event.
+// https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L7034
+// It is copied here to avoid having to import kube libraries.
+type Event struct {
+ TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata"`
+ Message string `json:"message,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Source EventSource `json:"source,omitempty"` // who is emitting this Event
+ Type string `json:"type,omitempty"` // Normal or Warning
+ // InvolvedObject is the subject of the Event. `kubectl describe` will, for most object types, display any
+ // currently present cluster Events matching the object (but you probably want to set UID for this to work).
+ InvolvedObject ObjectReference `json:"involvedObject"`
+ Count int32 `json:"count,omitempty"` // how many times Event was observed
+ FirstTimestamp time.Time `json:"firstTimestamp,omitempty"`
+ LastTimestamp time.Time `json:"lastTimestamp,omitempty"`
+}
+
+// EventSource includes a subset of fields from corev1.EventSource.
+// https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L7007
+// It is copied here to avoid having to import kube libraries.
+type EventSource struct {
+ // Component is the name of the component that is emitting the Event.
+ Component string `json:"component,omitempty"`
+}
+
+// ObjectReference contains a subset of fields from corev1.ObjectReference.
+// https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L6902
+// It is copied here to avoid having to import kube libraries.
+type ObjectReference struct {
+ // Kind of the referent.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ Kind string `json:"kind,omitempty"`
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ // +optional
+ Name string `json:"name,omitempty"`
+ // UID of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ // +optional
+ UID string `json:"uid,omitempty"`
+ // API version of the referent.
+ // +optional
+ APIVersion string `json:"apiVersion,omitempty"`
+}
+
// Status is a return value for calls that don't return other objects.
type Status struct {
TypeMeta `json:",inline"`
@@ -186,6 +239,6 @@ type Status struct {
Code int `json:"code,omitempty"`
}
-func (s *Status) Error() string {
+func (s Status) Error() string {
return s.Message
}
diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go
index e8ddec75d1584..d4309448df030 100644
--- a/kube/kubeclient/client.go
+++ b/kube/kubeclient/client.go
@@ -23,16 +23,21 @@ import (
"net/url"
"os"
"path/filepath"
+ "strings"
"sync"
"time"
"tailscale.com/kube/kubeapi"
+ "tailscale.com/tstime"
"tailscale.com/util/multierr"
)
const (
saPath = "/var/run/secrets/kubernetes.io/serviceaccount"
defaultURL = "https://kubernetes.default.svc"
+
+ TypeSecrets = "secrets"
+ typeEvents = "events"
)
// rootPathForTests is set by tests to override the root path to the
@@ -57,8 +62,13 @@ type Client interface {
GetSecret(context.Context, string) (*kubeapi.Secret, error)
UpdateSecret(context.Context, *kubeapi.Secret) error
CreateSecret(context.Context, *kubeapi.Secret) error
+ // Event attempts to ensure an event with the specified options associated with the Pod in which we are
+ // currently running. This is best effort - if the client is not able to create events, this operation will be a
+ // no-op. If there is already an Event with the given reason for the current Pod, it will get updated (only
+ // count and timestamp are expected to change), else a new event will be created.
+ Event(_ context.Context, typ, reason, msg string) error
StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error
- JSONPatchSecret(context.Context, string, []JSONPatch) error
+ JSONPatchResource(_ context.Context, resourceName string, resourceType string, patches []JSONPatch) error
CheckSecretPermissions(context.Context, string) (bool, bool, error)
SetDialer(dialer func(context.Context, string, string) (net.Conn, error))
SetURL(string)
@@ -66,15 +76,24 @@ type Client interface {
type client struct {
mu sync.Mutex
+ name string
url string
- ns string
+ podName string
+ podUID string
+ ns string // Pod namespace
client *http.Client
token string
tokenExpiry time.Time
+ cl tstime.Clock
+ // hasEventsPerms is true if client can emit Events for the Pod in which it runs. If it is set to false any
+ // calls to Events() will be a no-op.
+ hasEventsPerms bool
+ // kubeAPIRequest sends a request to the kube API server. It can set to a fake in tests.
+ kubeAPIRequest kubeAPIRequestFunc
}
// New returns a new client
-func New() (Client, error) {
+func New(name string) (Client, error) {
ns, err := readFile("namespace")
if err != nil {
return nil, err
@@ -87,9 +106,11 @@ func New() (Client, error) {
if ok := cp.AppendCertsFromPEM(caCert); !ok {
return nil, fmt.Errorf("kube: error in creating root cert pool")
}
- return &client{
- url: defaultURL,
- ns: string(ns),
+ c := &client{
+ url: defaultURL,
+ ns: string(ns),
+ name: name,
+ cl: tstime.DefaultClock{},
client: &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
@@ -97,7 +118,10 @@ func New() (Client, error) {
},
},
},
- }, nil
+ }
+ c.kubeAPIRequest = newKubeAPIRequest(c)
+ c.setEventPerms()
+ return c, nil
}
// SetURL sets the URL to use for the Kubernetes API.
@@ -115,14 +139,14 @@ func (c *client) SetDialer(dialer func(ctx context.Context, network, addr string
func (c *client) expireToken() {
c.mu.Lock()
defer c.mu.Unlock()
- c.tokenExpiry = time.Now()
+ c.tokenExpiry = c.cl.Now()
}
func (c *client) getOrRenewToken() (string, error) {
c.mu.Lock()
defer c.mu.Unlock()
tk, te := c.token, c.tokenExpiry
- if time.Now().Before(te) {
+ if c.cl.Now().Before(te) {
return tk, nil
}
@@ -131,17 +155,10 @@ func (c *client) getOrRenewToken() (string, error) {
return "", err
}
c.token = string(tkb)
- c.tokenExpiry = time.Now().Add(30 * time.Minute)
+ c.tokenExpiry = c.cl.Now().Add(30 * time.Minute)
return c.token, nil
}
-func (c *client) secretURL(name string) string {
- if name == "" {
- return fmt.Sprintf("%s/api/v1/namespaces/%s/secrets", c.url, c.ns)
- }
- return fmt.Sprintf("%s/api/v1/namespaces/%s/secrets/%s", c.url, c.ns, name)
-}
-
func getError(resp *http.Response) error {
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// These are the only success codes returned by the Kubernetes API.
@@ -161,36 +178,41 @@ func setHeader(key, value string) func(*http.Request) {
}
}
-// doRequest performs an HTTP request to the Kubernetes API.
-// If in is not nil, it is expected to be a JSON-encodable object and will be
-// sent as the request body.
-// If out is not nil, it is expected to be a pointer to an object that can be
-// decoded from JSON.
-// If the request fails with a 401, the token is expired and a new one is
-// requested.
-func (c *client) doRequest(ctx context.Context, method, url string, in, out any, opts ...func(*http.Request)) error {
- req, err := c.newRequest(ctx, method, url, in)
- if err != nil {
- return err
- }
- for _, opt := range opts {
- opt(req)
- }
- resp, err := c.client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if err := getError(resp); err != nil {
- if st, ok := err.(*kubeapi.Status); ok && st.Code == 401 {
- c.expireToken()
+type kubeAPIRequestFunc func(ctx context.Context, method, url string, in, out any, opts ...func(*http.Request)) error
+
+// newKubeAPIRequest returns a function that can perform an HTTP request to the Kubernetes API.
+func newKubeAPIRequest(c *client) kubeAPIRequestFunc {
+ // If in is not nil, it is expected to be a JSON-encodable object and will be
+ // sent as the request body.
+ // If out is not nil, it is expected to be a pointer to an object that can be
+ // decoded from JSON.
+ // If the request fails with a 401, the token is expired and a new one is
+ // requested.
+ f := func(ctx context.Context, method, url string, in, out any, opts ...func(*http.Request)) error {
+ req, err := c.newRequest(ctx, method, url, in)
+ if err != nil {
+ return err
}
- return err
- }
- if out != nil {
- return json.NewDecoder(resp.Body).Decode(out)
+ for _, opt := range opts {
+ opt(req)
+ }
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if err := getError(resp); err != nil {
+ if st, ok := err.(*kubeapi.Status); ok && st.Code == 401 {
+ c.expireToken()
+ }
+ return err
+ }
+ if out != nil {
+ return json.NewDecoder(resp.Body).Decode(out)
+ }
+ return nil
}
- return nil
+ return f
}
func (c *client) newRequest(ctx context.Context, method, url string, in any) (*http.Request, error) {
@@ -226,7 +248,7 @@ func (c *client) newRequest(ctx context.Context, method, url string, in any) (*h
// GetSecret fetches the secret from the Kubernetes API.
func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, error) {
s := &kubeapi.Secret{Data: make(map[string][]byte)}
- if err := c.doRequest(ctx, "GET", c.secretURL(name), nil, s); err != nil {
+ if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, TypeSecrets), nil, s); err != nil {
return nil, err
}
return s, nil
@@ -235,16 +257,16 @@ func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, e
// CreateSecret creates a secret in the Kubernetes API.
func (c *client) CreateSecret(ctx context.Context, s *kubeapi.Secret) error {
s.Namespace = c.ns
- return c.doRequest(ctx, "POST", c.secretURL(""), s, nil)
+ return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", TypeSecrets), s, nil)
}
// UpdateSecret updates a secret in the Kubernetes API.
func (c *client) UpdateSecret(ctx context.Context, s *kubeapi.Secret) error {
- return c.doRequest(ctx, "PUT", c.secretURL(s.Name), s, nil)
+ return c.kubeAPIRequest(ctx, "PUT", c.resourceURL(s.Name, TypeSecrets), s, nil)
}
// JSONPatch is a JSON patch operation.
-// It currently (2023-03-02) only supports "add" and "remove" operations.
+// It currently (2024-11-15) only supports "add", "remove" and "replace" operations.
//
// https://tools.ietf.org/html/rfc6902
type JSONPatch struct {
@@ -253,22 +275,22 @@ type JSONPatch struct {
Value any `json:"value,omitempty"`
}
-// JSONPatchSecret updates a secret in the Kubernetes API using a JSON patch.
-// It currently (2023-03-02) only supports "add" and "remove" operations.
-func (c *client) JSONPatchSecret(ctx context.Context, name string, patch []JSONPatch) error {
- for _, p := range patch {
+// JSONPatchResource updates a resource in the Kubernetes API using a JSON patch.
+// It currently (2024-11-15) only supports "add", "remove" and "replace" operations.
+func (c *client) JSONPatchResource(ctx context.Context, name, typ string, patches []JSONPatch) error {
+ for _, p := range patches {
if p.Op != "remove" && p.Op != "add" && p.Op != "replace" {
return fmt.Errorf("unsupported JSON patch operation: %q", p.Op)
}
}
- return c.doRequest(ctx, "PATCH", c.secretURL(name), patch, nil, setHeader("Content-Type", "application/json-patch+json"))
+ return c.kubeAPIRequest(ctx, "PATCH", c.resourceURL(name, typ), patches, nil, setHeader("Content-Type", "application/json-patch+json"))
}
// StrategicMergePatchSecret updates a secret in the Kubernetes API using a
// strategic merge patch.
// If a fieldManager is provided, it will be used to track the patch.
func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error {
- surl := c.secretURL(name)
+ surl := c.resourceURL(name, TypeSecrets)
if fieldManager != "" {
uv := url.Values{
"fieldManager": {fieldManager},
@@ -277,7 +299,66 @@ func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *
}
s.Namespace = c.ns
s.Name = name
- return c.doRequest(ctx, "PATCH", surl, s, nil, setHeader("Content-Type", "application/strategic-merge-patch+json"))
+ return c.kubeAPIRequest(ctx, "PATCH", surl, s, nil, setHeader("Content-Type", "application/strategic-merge-patch+json"))
+}
+
+// Event tries to ensure an Event associated with the Pod in which we are running. It is best effort - the event will be
+// created if the kube client on startup was able to determine the name and UID of this Pod from POD_NAME,POD_UID env
+// vars and if permissions check for event creation succeeded. Events are keyed on opts.Reason- if an Event for the
+// current Pod with that reason already exists, its count and first timestamp will be updated, else a new Event will be
+// created.
+func (c *client) Event(ctx context.Context, typ, reason, msg string) error {
+ if !c.hasEventsPerms {
+ return nil
+ }
+ name := c.nameForEvent(reason)
+ ev, err := c.getEvent(ctx, name)
+ now := c.cl.Now()
+ if err != nil {
+ if !IsNotFoundErr(err) {
+ return err
+ }
+ // Event not found - create it
+ ev := kubeapi.Event{
+ ObjectMeta: kubeapi.ObjectMeta{
+ Name: name,
+ Namespace: c.ns,
+ },
+ Type: typ,
+ Reason: reason,
+ Message: msg,
+ Source: kubeapi.EventSource{
+ Component: c.name,
+ },
+ InvolvedObject: kubeapi.ObjectReference{
+ Name: c.podName,
+ Namespace: c.ns,
+ UID: c.podUID,
+ Kind: "Pod",
+ APIVersion: "v1",
+ },
+
+ FirstTimestamp: now,
+ LastTimestamp: now,
+ Count: 1,
+ }
+ return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", typeEvents), &ev, nil)
+ }
+ // If the Event already exists, we patch its count and last timestamp. This ensures that when users run 'kubectl
+ // describe pod...', they see the event just once (but with a message of how many times it has appeared over
+ // last timestamp - first timestamp period of time).
+ count := ev.Count + 1
+ countPatch := JSONPatch{
+ Op: "replace",
+ Value: count,
+ Path: "/count",
+ }
+ tsPatch := JSONPatch{
+ Op: "replace",
+ Value: now,
+ Path: "/lastTimestamp",
+ }
+ return c.JSONPatchResource(ctx, name, typeEvents, []JSONPatch{countPatch, tsPatch})
}
// CheckSecretPermissions checks the secret access permissions of the current
@@ -293,7 +374,7 @@ func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *
func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) (canPatch, canCreate bool, err error) {
var errs []error
for _, verb := range []string{"get", "update"} {
- ok, err := c.checkPermission(ctx, verb, secretName)
+ ok, err := c.checkPermission(ctx, verb, TypeSecrets, secretName)
if err != nil {
log.Printf("error checking %s permission on secret %s: %v", verb, secretName, err)
} else if !ok {
@@ -303,12 +384,12 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string)
if len(errs) > 0 {
return false, false, multierr.New(errs...)
}
- canPatch, err = c.checkPermission(ctx, "patch", secretName)
+ canPatch, err = c.checkPermission(ctx, "patch", TypeSecrets, secretName)
if err != nil {
log.Printf("error checking patch permission on secret %s: %v", secretName, err)
return false, false, nil
}
- canCreate, err = c.checkPermission(ctx, "create", secretName)
+ canCreate, err = c.checkPermission(ctx, "create", TypeSecrets, secretName)
if err != nil {
log.Printf("error checking create permission on secret %s: %v", secretName, err)
return false, false, nil
@@ -316,19 +397,64 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string)
return canPatch, canCreate, nil
}
-// checkPermission reports whether the current pod has permission to use the
-// given verb (e.g. get, update, patch, create) on secretName.
-func (c *client) checkPermission(ctx context.Context, verb, secretName string) (bool, error) {
+func IsNotFoundErr(err error) bool {
+ if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 {
+ return true
+ }
+ return false
+}
+
+// setEventPerms checks whether this client will be able to write tailscaled Events to its Pod and updates the state
+// accordingly. If it determines that the client can not write Events, any subsequent calls to client.Event will be a
+// no-op.
+func (c *client) setEventPerms() {
+ name := os.Getenv("POD_NAME")
+ uid := os.Getenv("POD_UID")
+ hasPerms := false
+ defer func() {
+ c.podName = name
+ c.podUID = uid
+ c.hasEventsPerms = hasPerms
+ if !hasPerms {
+ log.Printf(`kubeclient: this client is not able to write tailscaled Events to the Pod in which it is running.
+ To help with future debugging you can make it able write Events by giving it get,create,patch permissions for Events in the Pod namespace
+ and setting POD_NAME, POD_UID env vars for the Pod.`)
+ }
+ }()
+ if name == "" || uid == "" {
+ return
+ }
+ for _, verb := range []string{"get", "create", "patch"} {
+ can, err := c.checkPermission(context.Background(), verb, typeEvents, "")
+ if err != nil {
+ log.Printf("kubeclient: error checking Events permissions: %v", err)
+ return
+ }
+ if !can {
+ return
+ }
+ }
+ hasPerms = true
+ return
+}
+
+// checkPermission reports whether the current pod has permission to use the given verb (e.g. get, update, patch,
+// create) on the given resource type. If name is not an empty string, will check the check will be for resource with
+// the given name only.
+func (c *client) checkPermission(ctx context.Context, verb, typ, name string) (bool, error) {
+ ra := map[string]any{
+ "namespace": c.ns,
+ "verb": verb,
+ "resource": typ,
+ }
+ if name != "" {
+ ra["name"] = name
+ }
sar := map[string]any{
"apiVersion": "authorization.k8s.io/v1",
"kind": "SelfSubjectAccessReview",
"spec": map[string]any{
- "resourceAttributes": map[string]any{
- "namespace": c.ns,
- "verb": verb,
- "resource": "secrets",
- "name": secretName,
- },
+ "resourceAttributes": ra,
},
}
var res struct {
@@ -337,15 +463,32 @@ func (c *client) checkPermission(ctx context.Context, verb, secretName string) (
} `json:"status"`
}
url := c.url + "/apis/authorization.k8s.io/v1/selfsubjectaccessreviews"
- if err := c.doRequest(ctx, "POST", url, sar, &res); err != nil {
+ if err := c.kubeAPIRequest(ctx, "POST", url, sar, &res); err != nil {
return false, err
}
return res.Status.Allowed, nil
}
-func IsNotFoundErr(err error) bool {
- if st, ok := err.(*kubeapi.Status); ok && st.Code == 404 {
- return true
+// resourceURL returns a URL that can be used to interact with the given resource type and, if name is not empty string,
+// the named resource of that type.
+// Note that this only works for core/v1 resource types.
+func (c *client) resourceURL(name, typ string) string {
+ if name == "" {
+ return fmt.Sprintf("%s/api/v1/namespaces/%s/%s", c.url, c.ns, typ)
}
- return false
+ return fmt.Sprintf("%s/api/v1/namespaces/%s/%s/%s", c.url, c.ns, typ, name)
+}
+
+// nameForEvent returns a name for the Event that uniquely identifies Event with that reason for the current Pod.
+func (c *client) nameForEvent(reason string) string {
+ return fmt.Sprintf("%s.%s.%s", c.podName, c.podUID, strings.ToLower(reason))
+}
+
+// getEvent fetches the event from the Kubernetes API.
+func (c *client) getEvent(ctx context.Context, name string) (*kubeapi.Event, error) {
+ e := &kubeapi.Event{}
+ if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, typeEvents), nil, e); err != nil {
+ return nil, err
+ }
+ return e, nil
}
diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go
new file mode 100644
index 0000000000000..31878befe4106
--- /dev/null
+++ b/kube/kubeclient/client_test.go
@@ -0,0 +1,151 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package kubeclient
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "tailscale.com/kube/kubeapi"
+ "tailscale.com/tstest"
+)
+
+func Test_client_Event(t *testing.T) {
+ cl := &tstest.Clock{}
+ tests := []struct {
+ name string
+ typ string
+ reason string
+ msg string
+ argSets []args
+ wantErr bool
+ }{
+ {
+ name: "new_event_gets_created",
+ typ: "Normal",
+ reason: "TestReason",
+ msg: "TestMessage",
+ argSets: []args{
+ { // request to GET event returns not found
+ wantsMethod: "GET",
+ wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events/test-pod.test-uid.testreason",
+ setErr: &kubeapi.Status{Code: 404},
+ },
+ { // sends POST request to create event
+ wantsMethod: "POST",
+ wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events",
+ wantsIn: &kubeapi.Event{
+ ObjectMeta: kubeapi.ObjectMeta{
+ Name: "test-pod.test-uid.testreason",
+ Namespace: "test-ns",
+ },
+ Type: "Normal",
+ Reason: "TestReason",
+ Message: "TestMessage",
+ Source: kubeapi.EventSource{
+ Component: "test-client",
+ },
+ InvolvedObject: kubeapi.ObjectReference{
+ Name: "test-pod",
+ UID: "test-uid",
+ Namespace: "test-ns",
+ APIVersion: "v1",
+ Kind: "Pod",
+ },
+ FirstTimestamp: cl.Now(),
+ LastTimestamp: cl.Now(),
+ Count: 1,
+ },
+ },
+ },
+ },
+ {
+ name: "existing_event_gets_patched",
+ typ: "Warning",
+ reason: "TestReason",
+ msg: "TestMsg",
+ argSets: []args{
+ { // request to GET event does not error - this is enough to assume that event exists
+ wantsMethod: "GET",
+ wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events/test-pod.test-uid.testreason",
+ setOut: []byte(`{"count":2}`),
+ },
+ { // sends PATCH request to update the event
+ wantsMethod: "PATCH",
+ wantsURL: "test-apiserver/api/v1/namespaces/test-ns/events/test-pod.test-uid.testreason",
+ wantsIn: []JSONPatch{
+ {Op: "replace", Path: "/count", Value: int32(3)},
+ {Op: "replace", Path: "/lastTimestamp", Value: cl.Now()},
+ },
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := &client{
+ cl: cl,
+ name: "test-client",
+ podName: "test-pod",
+ podUID: "test-uid",
+ url: "test-apiserver",
+ ns: "test-ns",
+ kubeAPIRequest: fakeKubeAPIRequest(t, tt.argSets),
+ hasEventsPerms: true,
+ }
+ if err := c.Event(context.Background(), tt.typ, tt.reason, tt.msg); (err != nil) != tt.wantErr {
+ t.Errorf("client.Event() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+// args is a set of values for testing a single call to client.kubeAPIRequest.
+type args struct {
+ // wantsMethod is the expected value of 'method' arg.
+ wantsMethod string
+ // wantsURL is the expected value of 'url' arg.
+ wantsURL string
+ // wantsIn is the expected value of 'in' arg.
+ wantsIn any
+ // setOut can be set to a byte slice representing valid JSON. If set 'out' arg will get set to the unmarshalled
+ // JSON object.
+ setOut []byte
+ // setErr is the error that kubeAPIRequest will return.
+ setErr error
+}
+
+// fakeKubeAPIRequest can be used to test that a series of calls to client.kubeAPIRequest gets called with expected
+// values and to set these calls to return preconfigured values. 'argSets' should be set to a slice of expected
+// arguments and should-be return values of a series of kubeAPIRequest calls.
+func fakeKubeAPIRequest(t *testing.T, argSets []args) kubeAPIRequestFunc {
+ count := 0
+ f := func(ctx context.Context, gotMethod, gotUrl string, gotIn, gotOut any, opts ...func(*http.Request)) error {
+ t.Helper()
+ if count >= len(argSets) {
+ t.Fatalf("unexpected call to client.kubeAPIRequest, expected %d calls, but got a %dth call", len(argSets), count+1)
+ }
+ a := argSets[count]
+ if gotMethod != a.wantsMethod {
+ t.Errorf("[%d] got method %q, wants method %q", count, gotMethod, a.wantsMethod)
+ }
+ if gotUrl != a.wantsURL {
+ t.Errorf("[%d] got URL %q, wants URL %q", count, gotUrl, a.wantsURL)
+ }
+ if d := cmp.Diff(gotIn, a.wantsIn); d != "" {
+ t.Errorf("[%d] unexpected payload (-want + got):\n%s", count, d)
+ }
+ if len(a.setOut) != 0 {
+ if err := json.Unmarshal(a.setOut, gotOut); err != nil {
+ t.Fatalf("[%d] error unmarshalling output: %v", count, err)
+ }
+ }
+ count++
+ return a.setErr
+ }
+ return f
+}
diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go
index 3cef3d27ee0df..5716ca31b2f4c 100644
--- a/kube/kubeclient/fake_client.go
+++ b/kube/kubeclient/fake_client.go
@@ -29,7 +29,11 @@ func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr s
func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error {
return nil
}
-func (fc *FakeClient) JSONPatchSecret(context.Context, string, []JSONPatch) error {
+func (fc *FakeClient) Event(context.Context, string, string, string) error {
+ return nil
+}
+
+func (fc *FakeClient) JSONPatchResource(context.Context, string, string, []JSONPatch) error {
return nil
}
func (fc *FakeClient) UpdateSecret(context.Context, *kubeapi.Secret) error { return nil }
diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/metrics.go
deleted file mode 100644
index b183f1f6f79f7..0000000000000
--- a/kube/kubetypes/metrics.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package kubetypes
-
-const (
- // Hostinfo App values for the Tailscale Kubernetes Operator components.
- AppOperator = "k8s-operator"
- AppAPIServerProxy = "k8s-operator-proxy"
- AppIngressProxy = "k8s-operator-ingress-proxy"
- AppIngressResource = "k8s-operator-ingress-resource"
- AppEgressProxy = "k8s-operator-egress-proxy"
- AppConnector = "k8s-operator-connector-resource"
-
- // Clientmetrics for Tailscale Kubernetes Operator components
- MetricIngressProxyCount = "k8s_ingress_proxies" // L3
- MetricIngressResourceCount = "k8s_ingress_resources" // L7
- MetricEgressProxyCount = "k8s_egress_proxies"
- MetricConnectorResourceCount = "k8s_connector_resources"
- MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
- MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources"
- MetricNameserverCount = "k8s_nameserver_resources"
- MetricRecorderCount = "k8s_recorder_resources"
- MetricEgressServiceCount = "k8s_egress_service_resources"
- MetricProxyGroupCount = "k8s_proxygroup_resources"
-)
diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go
new file mode 100644
index 0000000000000..3c97d8c7da2c5
--- /dev/null
+++ b/kube/kubetypes/types.go
@@ -0,0 +1,45 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package kubetypes
+
+const (
+ // Hostinfo App values for the Tailscale Kubernetes Operator components.
+ AppOperator = "k8s-operator"
+ AppAPIServerProxy = "k8s-operator-proxy"
+ AppIngressProxy = "k8s-operator-ingress-proxy"
+ AppIngressResource = "k8s-operator-ingress-resource"
+ AppEgressProxy = "k8s-operator-egress-proxy"
+ AppConnector = "k8s-operator-connector-resource"
+ AppProxyGroupEgress = "k8s-operator-proxygroup-egress"
+ AppProxyGroupIngress = "k8s-operator-proxygroup-ingress"
+
+ // Clientmetrics for Tailscale Kubernetes Operator components
+ MetricIngressProxyCount = "k8s_ingress_proxies" // L3
+ MetricIngressResourceCount = "k8s_ingress_resources" // L7
+ MetricEgressProxyCount = "k8s_egress_proxies"
+ MetricConnectorResourceCount = "k8s_connector_resources"
+ MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
+ MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources"
+ MetricConnectorWithAppConnectorCount = "k8s_connector_appconnector_resources"
+ MetricNameserverCount = "k8s_nameserver_resources"
+ MetricRecorderCount = "k8s_recorder_resources"
+ MetricEgressServiceCount = "k8s_egress_service_resources"
+ MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources"
+ MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources"
+
+ // Keys that containerboot writes to state file that can be used to determine its state.
+ // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine
+ // the state of this tailscale device.
+ KeyDeviceID string = "device_id" // node stable ID of the device
+ KeyDeviceFQDN string = "device_fqdn" // device's tailnet hostname
+ KeyDeviceIPs string = "device_ips" // device's tailnet IPs
+ KeyPodUID string = "pod_uid" // Pod UID
+ // KeyCapVer contains Tailscale capability version of this proxy instance.
+ KeyCapVer string = "tailscale_capver"
+ // KeyHTTPSEndpoint is a name of a field that can be set to the value of any HTTPS endpoint currently exposed by
+ // this device to the tailnet. This is used by the Kubernetes operator Ingress proxy to communicate to the operator
+ // that cluster workloads behind the Ingress can now be accessed via the given DNS name over HTTPS.
+ KeyHTTPSEndpoint string = "https_endpoint"
+ ValueNoHTTPS string = "no-https"
+)
diff --git a/licenses/android.md b/licenses/android.md
index ef53117e8ceb7..94aeb3fc0615f 100644
--- a/licenses/android.md
+++ b/licenses/android.md
@@ -36,7 +36,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE))
- [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE))
- [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE))
- - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE))
- [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE))
- [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE))
- [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE))
@@ -57,7 +56,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE))
- [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE))
- [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE))
- - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE))
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE))
- [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE))
- [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE))
diff --git a/licenses/apple.md b/licenses/apple.md
index 4cb100c625942..aae006c95ede4 100644
--- a/licenses/apple.md
+++ b/licenses/apple.md
@@ -12,24 +12,23 @@ See also the dependencies in the [Tailscale CLI][].
- [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE))
- - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt))
- - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.16/internal/configsources/LICENSE.txt))
- - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.16/internal/endpoints/v2/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.23/internal/configsources/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.23/internal/endpoints/v2/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt))
- - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/internal/sync/singleflight/LICENSE))
- - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.4/service/internal/accept-encoding/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/internal/sync/singleflight/LICENSE))
+ - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.0/service/internal/accept-encoding/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt))
- - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.4/LICENSE))
- - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.4/internal/sync/singleflight/LICENSE))
+ - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.0/LICENSE))
+ - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.0/internal/sync/singleflight/LICENSE))
- [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE))
- - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt))
- [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE))
- [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md))
- [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE))
@@ -48,9 +47,9 @@ See also the dependencies in the [Tailscale CLI][].
- [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE))
- [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license))
- [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md))
- - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.8/LICENSE))
- - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.8/internal/snapref/LICENSE))
- - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.8/zstd/internal/xxhash/LICENSE.txt))
+ - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE))
+ - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE))
+ - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt))
- [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE))
- [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md))
- [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md))
@@ -63,7 +62,6 @@ See also the dependencies in the [Tailscale CLI][].
- [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE))
- [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE))
- [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE))
- - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE))
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE))
- [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE))
- [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE))
@@ -74,13 +72,13 @@ See also the dependencies in the [Tailscale CLI][].
- [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE))
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE))
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE))
- - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE))
- - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE))
- - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
- - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
+ - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE))
+ - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fc45aab8:LICENSE))
+ - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.30.0:LICENSE))
+ - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE))
+ - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE))
+ - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.20.0:LICENSE))
- [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE))
- [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE))
- [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE))
diff --git a/licenses/tailscale.md b/licenses/tailscale.md
index 544aa91cecab1..8f05acedcf93f 100644
--- a/licenses/tailscale.md
+++ b/licenses/tailscale.md
@@ -58,9 +58,9 @@ Some packages may only be included on certain architectures or operating systems
- [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE))
- [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license))
- [github.com/kballard/go-shellquote](https://pkg.go.dev/github.com/kballard/go-shellquote) ([MIT](https://github.com/kballard/go-shellquote/blob/95032a82bc51/LICENSE))
- - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.4/LICENSE))
- - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.4/internal/snapref/LICENSE))
- - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.4/zstd/internal/xxhash/LICENSE.txt))
+ - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE))
+ - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE))
+ - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt))
- [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE))
- [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE))
- [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE))
@@ -80,12 +80,11 @@ Some packages may only be included on certain architectures or operating systems
- [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md))
- [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE))
- [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE))
- - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE))
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE))
- [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE))
- [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE))
- [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE))
- - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE))
+ - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/4e883d38c8d3/LICENSE))
- [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE))
- [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE))
- [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md))
@@ -99,8 +98,8 @@ Some packages may only be included on certain architectures or operating systems
- [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE))
- [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.16.0:LICENSE))
- - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
+ - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE))
- [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
- [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE))
diff --git a/licenses/windows.md b/licenses/windows.md
index e7f7f6f13ca08..4cb35e8de2785 100644
--- a/licenses/windows.md
+++ b/licenses/windows.md
@@ -13,22 +13,22 @@ Windows][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE))
- [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE))
- [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE))
- - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt))
- - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.16/internal/configsources/LICENSE.txt))
- - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.16/internal/endpoints/v2/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.23/internal/configsources/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.23/internal/endpoints/v2/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt))
- - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/internal/sync/singleflight/LICENSE))
- - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.4/service/internal/accept-encoding/LICENSE.txt))
+ - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.32.4/internal/sync/singleflight/LICENSE))
+ - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.0/service/internal/accept-encoding/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt))
- [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt))
- - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.4/LICENSE))
- - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.4/internal/sync/singleflight/LICENSE))
+ - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.0/LICENSE))
+ - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.0/internal/sync/singleflight/LICENSE))
- [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE))
- [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE))
- [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE))
@@ -44,9 +44,9 @@ Windows][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE))
- [github.com/josharian/native](https://pkg.go.dev/github.com/josharian/native) ([MIT](https://github.com/josharian/native/blob/5c7d0dd6ab86/license))
- [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md))
- - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.8/LICENSE))
- - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.8/internal/snapref/LICENSE))
- - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.8/zstd/internal/xxhash/LICENSE.txt))
+ - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE))
+ - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE))
+ - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt))
- [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/v1.7.2/LICENSE.md))
- [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md))
- [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE))
@@ -57,23 +57,23 @@ Windows][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE))
- [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE))
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE))
- - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/52804fd3056a/LICENSE))
- - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/6580b55d49ca/LICENSE))
+ - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/8865133fd3ef/LICENSE))
+ - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/28f7e73c7afb/LICENSE))
- [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE))
- [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE))
- [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE))
- [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE))
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE))
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE))
- - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE))
- - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE))
+ - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.28.0:LICENSE))
+ - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fc45aab8:LICENSE))
- [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE))
- [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE))
- - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
- - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
+ - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.30.0:LICENSE))
+ - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.9.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.27.0:LICENSE))
+ - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.25.0:LICENSE))
+ - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.20.0:LICENSE))
- [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2))
- [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3))
- [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE))
diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go
index 0d2af77f2d703..d657c4e9352f3 100644
--- a/logpolicy/logpolicy.go
+++ b/logpolicy/logpolicy.go
@@ -230,6 +230,9 @@ func LogsDir(logf logger.Logf) string {
logf("logpolicy: using $STATE_DIRECTORY, %q", systemdStateDir)
return systemdStateDir
}
+ case "js":
+ logf("logpolicy: no logs directory in the browser")
+ return ""
}
// Default to e.g. /var/lib/tailscale or /var/db/tailscale on Unix.
diff --git a/logtail/logtail.go b/logtail/logtail.go
index 9df164273d74c..13e8e85fd40f7 100644
--- a/logtail/logtail.go
+++ b/logtail/logtail.go
@@ -213,6 +213,7 @@ type Logger struct {
procSequence uint64
flushTimer tstime.TimerController // used when flushDelay is >0
writeBuf [bufferSize]byte // owned by Write for reuse
+ bytesBuf bytes.Buffer // owned by appendTextOrJSONLocked for reuse
jsonDec jsontext.Decoder // owned by appendTextOrJSONLocked for reuse
shutdownStartMu sync.Mutex // guards the closing of shutdownStart
@@ -725,9 +726,16 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte {
// whether it contains the reserved "logtail" name at the top-level.
var logtailKeyOffset, logtailValOffset, logtailValLength int
validJSON := func() bool {
- // TODO(dsnet): Avoid allocation of bytes.Buffer struct.
+ // The jsontext.NewDecoder API operates on an io.Reader, for which
+ // bytes.Buffer provides a means to convert a []byte into an io.Reader.
+ // However, bytes.NewBuffer normally allocates unless
+ // we immediately shallow copy it into a pre-allocated Buffer struct.
+ // See https://go.dev/issue/67004.
+ l.bytesBuf = *bytes.NewBuffer(src)
+ defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src
+
dec := &l.jsonDec
- dec.Reset(bytes.NewBuffer(src))
+ dec.Reset(&l.bytesBuf)
if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil {
return false
}
diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go
index c6e8bca3a19a2..7d598d853349d 100644
--- a/net/captivedetection/captivedetection.go
+++ b/net/captivedetection/captivedetection.go
@@ -136,26 +136,31 @@ func interfaceNameDoesNotNeedCaptiveDetection(ifName string, goos string) bool {
func (d *Detector) detectOnInterface(ctx context.Context, ifIndex int, endpoints []Endpoint) bool {
defer d.httpClient.CloseIdleConnections()
- d.logf("[v2] %d available captive portal detection endpoints: %v", len(endpoints), endpoints)
+ use := min(len(endpoints), 5)
+ endpoints = endpoints[:use]
+ d.logf("[v2] %d available captive portal detection endpoints; trying %v", len(endpoints), use)
// We try to detect the captive portal more quickly by making requests to multiple endpoints concurrently.
var wg sync.WaitGroup
resultCh := make(chan bool, len(endpoints))
- for i, e := range endpoints {
- if i >= 5 {
- // Try a maximum of 5 endpoints, break out (returning false) if we run of attempts.
- break
- }
+ // Once any goroutine detects a captive portal, we shut down the others.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ for _, e := range endpoints {
wg.Add(1)
go func(endpoint Endpoint) {
defer wg.Done()
found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, ifIndex)
if err != nil {
- d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
+ if ctx.Err() == nil {
+ d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
+ }
return
}
if found {
+ cancel() // one match is good enough
resultCh <- true
}
}(e)
diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go
index e74273afd922e..29a197d31f263 100644
--- a/net/captivedetection/captivedetection_test.go
+++ b/net/captivedetection/captivedetection_test.go
@@ -7,10 +7,12 @@ import (
"context"
"runtime"
"sync"
+ "sync/atomic"
"testing"
- "tailscale.com/cmd/testwrapper/flakytest"
"tailscale.com/net/netmon"
+ "tailscale.com/syncs"
+ "tailscale.com/tstest/nettest"
)
func TestAvailableEndpointsAlwaysAtLeastTwo(t *testing.T) {
@@ -36,25 +38,46 @@ func TestDetectCaptivePortalReturnsFalse(t *testing.T) {
}
}
-func TestAllEndpointsAreUpAndReturnExpectedResponse(t *testing.T) {
- flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13019")
+func TestEndpointsAreUpAndReturnExpectedResponse(t *testing.T) {
+ nettest.SkipIfNoNetwork(t)
+
d := NewDetector(t.Logf)
endpoints := availableEndpoints(nil, 0, t.Logf, runtime.GOOS)
+ t.Logf("testing %d endpoints", len(endpoints))
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ var good atomic.Bool
var wg sync.WaitGroup
+ sem := syncs.NewSemaphore(5)
for _, e := range endpoints {
wg.Add(1)
go func(endpoint Endpoint) {
defer wg.Done()
- found, err := d.verifyCaptivePortalEndpoint(context.Background(), endpoint, 0)
- if err != nil {
- t.Errorf("verifyCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
+
+ if !sem.AcquireContext(ctx) {
+ return
+ }
+ defer sem.Release()
+
+ found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, 0)
+ if err != nil && ctx.Err() == nil {
+ t.Logf("verifyCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
}
if found {
- t.Errorf("verifyCaptivePortalEndpoint with endpoint %v says we're behind a captive portal, but we aren't", endpoint)
+ t.Logf("verifyCaptivePortalEndpoint with endpoint %v says we're behind a captive portal, but we aren't", endpoint)
+ return
}
+ good.Store(true)
+ t.Logf("endpoint good: %v", endpoint)
+ cancel()
}(e)
}
wg.Wait()
+
+ if !good.Load() {
+ t.Errorf("no good endpoints found")
+ }
}
diff --git a/net/connstats/stats.go b/net/connstats/stats.go
index dbcd946b82d9a..4e6d8e109aaad 100644
--- a/net/connstats/stats.go
+++ b/net/connstats/stats.go
@@ -131,23 +131,23 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) {
s.virtual[conn] = cnts
}
-// UpdateTxPhysical updates the counters for a transmitted wireguard packet
+// UpdateTxPhysical updates the counters for zero or more transmitted wireguard packets.
// The src is always a Tailscale IP address, representing some remote peer.
// The dst is a remote IP address and port that corresponds
// with some physical peer backing the Tailscale IP address.
-func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, n int) {
- s.updatePhysical(src, dst, n, false)
+func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {
+ s.updatePhysical(src, dst, packets, bytes, false)
}
-// UpdateRxPhysical updates the counters for a received wireguard packet.
+// UpdateRxPhysical updates the counters for zero or more received wireguard packets.
// The src is always a Tailscale IP address, representing some remote peer.
// The dst is a remote IP address and port that corresponds
// with some physical peer backing the Tailscale IP address.
-func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, n int) {
- s.updatePhysical(src, dst, n, true)
+func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {
+ s.updatePhysical(src, dst, packets, bytes, true)
}
-func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, n int, receive bool) {
+func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int, receive bool) {
conn := netlogtype.Connection{Src: netip.AddrPortFrom(src, 0), Dst: dst}
s.mu.Lock()
@@ -157,11 +157,11 @@ func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, n int, r
return
}
if receive {
- cnts.RxPackets++
- cnts.RxBytes += uint64(n)
+ cnts.RxPackets += uint64(packets)
+ cnts.RxBytes += uint64(bytes)
} else {
- cnts.TxPackets++
- cnts.TxBytes += uint64(n)
+ cnts.TxPackets += uint64(packets)
+ cnts.TxBytes += uint64(bytes)
}
s.physical[conn] = cnts
}
diff --git a/net/dns/manager.go b/net/dns/manager.go
index 51a0fa12cba63..13cb2d84e1930 100644
--- a/net/dns/manager.go
+++ b/net/dns/manager.go
@@ -8,6 +8,7 @@ import (
"context"
"encoding/binary"
"errors"
+ "fmt"
"io"
"net"
"net/netip"
@@ -156,11 +157,11 @@ func (m *Manager) setLocked(cfg Config) error {
return err
}
if err := m.os.SetDNS(ocfg); err != nil {
- m.health.SetDNSOSHealth(err)
+ m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()})
return err
}
- m.health.SetDNSOSHealth(nil)
+ m.health.SetHealthy(osConfigurationSetWarnable)
m.config = &cfg
return nil
@@ -217,6 +218,26 @@ func compileHostEntries(cfg Config) (hosts []*HostEntry) {
return hosts
}
+var osConfigurationReadWarnable = health.Register(&health.Warnable{
+ Code: "dns-read-os-config-failed",
+ Title: "Failed to read system DNS configuration",
+ Text: func(args health.Args) string {
+ return fmt.Sprintf("Tailscale failed to fetch the DNS configuration of your device: %v", args[health.ArgError])
+ },
+ Severity: health.SeverityLow,
+ DependsOn: []*health.Warnable{health.NetworkStatusWarnable},
+})
+
+var osConfigurationSetWarnable = health.Register(&health.Warnable{
+ Code: "dns-set-os-config-failed",
+ Title: "Failed to set system DNS configuration",
+ Text: func(args health.Args) string {
+ return fmt.Sprintf("Tailscale failed to set the DNS configuration of your device: %v", args[health.ArgError])
+ },
+ Severity: health.SeverityMedium,
+ DependsOn: []*health.Warnable{health.NetworkStatusWarnable},
+})
+
// compileConfig converts cfg into a quad-100 resolver configuration
// and an OS-level configuration.
func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig, err error) {
@@ -320,9 +341,10 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig
// This is currently (2022-10-13) expected on certain iOS and macOS
// builds.
} else {
- m.health.SetDNSOSHealth(err)
+ m.health.SetUnhealthy(osConfigurationReadWarnable, health.Args{health.ArgError: err.Error()})
return resolver.Config{}, OSConfig{}, err
}
+ m.health.SetHealthy(osConfigurationReadWarnable)
}
if baseCfg == nil {
diff --git a/net/dns/resolvd.go b/net/dns/resolvd.go
index 9b067eb07b178..ad1a99c111997 100644
--- a/net/dns/resolvd.go
+++ b/net/dns/resolvd.go
@@ -57,6 +57,7 @@ func (m *resolvdManager) SetDNS(config OSConfig) error {
if len(newSearch) > 1 {
newResolvConf = append(newResolvConf, []byte(strings.Join(newSearch, " "))...)
+ newResolvConf = append(newResolvConf, '\n')
}
err = m.fs.WriteFile(resolvConf, newResolvConf, 0644)
@@ -123,6 +124,6 @@ func (m resolvdManager) readResolvConf() (config OSConfig, err error) {
}
func removeSearchLines(orig []byte) []byte {
- re := regexp.MustCompile(`(?m)^search\s+.+$`)
+ re := regexp.MustCompile(`(?ms)^search\s+.+$`)
return re.ReplaceAll(orig, []byte(""))
}
diff --git a/net/dns/resolved.go b/net/dns/resolved.go
index d82d3fc31d80a..1a7c8604101db 100644
--- a/net/dns/resolved.go
+++ b/net/dns/resolved.go
@@ -163,9 +163,9 @@ func (m *resolvedManager) run(ctx context.Context) {
}
conn.Signal(signals)
- // Reset backoff and SetNSOSHealth after successful on reconnect.
+ // Reset backoff and set osConfigurationSetWarnable to healthy after a successful reconnect.
bo.BackOff(ctx, nil)
- m.health.SetDNSOSHealth(nil)
+ m.health.SetHealthy(osConfigurationSetWarnable)
return nil
}
@@ -243,9 +243,12 @@ func (m *resolvedManager) run(ctx context.Context) {
// Set health while holding the lock, because this will
// graciously serialize the resync's health outcome with a
// concurrent SetDNS call.
- m.health.SetDNSOSHealth(err)
+
if err != nil {
m.logf("failed to configure systemd-resolved: %v", err)
+ m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()})
+ } else {
+ m.health.SetHealthy(osConfigurationSetWarnable)
}
}
}
diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go
index e341186ecf45e..f3e592d4f1993 100644
--- a/net/dns/resolver/forwarder_test.go
+++ b/net/dns/resolver/forwarder_test.go
@@ -27,6 +27,7 @@ import (
"tailscale.com/health"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
+ "tailscale.com/tstest"
"tailscale.com/types/dnstype"
)
@@ -276,6 +277,8 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on
tb.Fatal("cannot skip both UDP and TCP servers")
}
+ logf := tstest.WhileTestRunningLogger(tb)
+
tcpResponse := make([]byte, len(response)+2)
binary.BigEndian.PutUint16(tcpResponse, uint16(len(response)))
copy(tcpResponse[2:], response)
@@ -329,13 +332,13 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on
// Read the length header, then the buffer
var length uint16
if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
- tb.Logf("error reading length header: %v", err)
+ logf("error reading length header: %v", err)
return
}
req := make([]byte, length)
n, err := io.ReadFull(conn, req)
if err != nil {
- tb.Logf("error reading query: %v", err)
+ logf("error reading query: %v", err)
return
}
req = req[:n]
@@ -343,7 +346,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on
// Write response
if _, err := conn.Write(tcpResponse); err != nil {
- tb.Logf("error writing response: %v", err)
+ logf("error writing response: %v", err)
return
}
}
@@ -367,7 +370,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on
handleUDP := func(addr netip.AddrPort, req []byte) {
onRequest(false, req)
if _, err := udpLn.WriteToUDPAddrPort(response, addr); err != nil {
- tb.Logf("error writing response: %v", err)
+ logf("error writing response: %v", err)
}
}
@@ -390,7 +393,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on
tb.Cleanup(func() {
tcpLn.Close()
udpLn.Close()
- tb.Logf("waiting for listeners to finish...")
+ logf("waiting for listeners to finish...")
wg.Wait()
})
return
@@ -450,7 +453,8 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte)
}
func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) {
- netMon, err := netmon.New(tb.Logf)
+ logf := tstest.WhileTestRunningLogger(tb)
+ netMon, err := netmon.New(logf)
if err != nil {
tb.Fatal(err)
}
@@ -458,7 +462,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports
var dialer tsdial.Dialer
dialer.SetNetMon(netMon)
- fwd := newForwarder(tb.Logf, netMon, nil, &dialer, new(health.Tracker), nil)
+ fwd := newForwarder(logf, netMon, nil, &dialer, new(health.Tracker), nil)
if modify != nil {
modify(fwd)
}
diff --git a/net/ipset/ipset.go b/net/ipset/ipset.go
index 622fd61d05c16..27c1e27ed4180 100644
--- a/net/ipset/ipset.go
+++ b/net/ipset/ipset.go
@@ -82,8 +82,8 @@ func NewContainsIPFunc(addrs views.Slice[netip.Prefix]) func(ip netip.Addr) bool
pathForTest("bart")
// Built a bart table.
t := &bart.Table[struct{}]{}
- for i := range addrs.Len() {
- t.Insert(addrs.At(i), struct{}{})
+ for _, p := range addrs.All() {
+ t.Insert(p, struct{}{})
}
return bartLookup(t)
}
@@ -99,8 +99,8 @@ func NewContainsIPFunc(addrs views.Slice[netip.Prefix]) func(ip netip.Addr) bool
// General case:
pathForTest("ip-map")
m := set.Set[netip.Addr]{}
- for i := range addrs.Len() {
- m.Add(addrs.At(i).Addr())
+ for _, p := range addrs.All() {
+ m.Add(p.Addr())
}
return ipInMap(m)
}
diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go
index 2149ac91a8841..7930f88f6dce6 100644
--- a/net/netcheck/netcheck.go
+++ b/net/netcheck/netcheck.go
@@ -85,13 +85,14 @@ const (
// Report contains the result of a single netcheck.
type Report struct {
- UDP bool // a UDP STUN round trip completed
- IPv6 bool // an IPv6 STUN round trip completed
- IPv4 bool // an IPv4 STUN round trip completed
- IPv6CanSend bool // an IPv6 packet was able to be sent
- IPv4CanSend bool // an IPv4 packet was able to be sent
- OSHasIPv6 bool // could bind a socket to ::1
- ICMPv4 bool // an ICMPv4 round trip completed
+ Now time.Time // the time the report was run
+ UDP bool // a UDP STUN round trip completed
+ IPv6 bool // an IPv6 STUN round trip completed
+ IPv4 bool // an IPv4 STUN round trip completed
+ IPv6CanSend bool // an IPv6 packet was able to be sent
+ IPv4CanSend bool // an IPv4 packet was able to be sent
+ OSHasIPv6 bool // could bind a socket to ::1
+ ICMPv4 bool // an ICMPv4 round trip completed
// MappingVariesByDestIP is whether STUN results depend which
// STUN server you're talking to (on IPv4).
@@ -235,6 +236,10 @@ type Client struct {
// If false, the default net.Resolver will be used, with no caching.
UseDNSCache bool
+ // if non-zero, force this DERP region to be preferred in all reports where
+ // the DERP is found to be reachable.
+ ForcePreferredDERP int
+
// For tests
testEnoughRegions int
testCaptivePortalDelay time.Duration
@@ -779,6 +784,12 @@ func (o *GetReportOpts) getLastDERPActivity(region int) time.Time {
return o.GetLastDERPActivity(region)
}
+func (c *Client) SetForcePreferredDERP(region int) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.ForcePreferredDERP = region
+}
+
// GetReport gets a report. The 'opts' argument is optional and can be nil.
// Callers are discouraged from passing a ctx with an arbitrary deadline as this
// may cause GetReport to return prematurely before all reporting methods have
@@ -1220,17 +1231,19 @@ func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion
// Try pinging the first node in the region
node := reg.Nodes[0]
- // Get the IPAddr by asking for the UDP address that we would use for
- // STUN and then using that IP.
- //
- // TODO(andrew-d): this is a bit ugly
- nodeAddr := c.nodeAddr(ctx, node, probeIPv4)
- if !nodeAddr.IsValid() {
+ if node.STUNPort < 0 {
+ // If STUN is disabled on a node, interpret that as meaning don't measure latency.
+ return 0, false, nil
+ }
+ const unusedPort = 0
+ stunAddrPort, ok := c.nodeAddrPort(ctx, node, unusedPort, probeIPv4)
+ if !ok {
return 0, false, fmt.Errorf("no address for node %v (v4-for-icmp)", node.Name)
}
+ ip := stunAddrPort.Addr()
addr := &net.IPAddr{
- IP: net.IP(nodeAddr.Addr().AsSlice()),
- Zone: nodeAddr.Addr().Zone(),
+ IP: net.IP(ip.AsSlice()),
+ Zone: ip.Zone(),
}
// Use the unique node.Name field as the packet data to reduce the
@@ -1274,6 +1287,9 @@ func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) {
if r.CaptivePortal != "" {
fmt.Fprintf(w, " captiveportal=%v", r.CaptivePortal)
}
+ if c.ForcePreferredDERP != 0 {
+ fmt.Fprintf(w, " force=%v", c.ForcePreferredDERP)
+ }
fmt.Fprintf(w, " derp=%v", r.PreferredDERP)
if r.PreferredDERP != 0 {
fmt.Fprintf(w, " derpdist=")
@@ -1335,6 +1351,7 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report,
c.prev = map[time.Time]*Report{}
}
now := c.timeNow()
+ r.Now = now.UTC()
c.prev[now] = r
c.last = r
@@ -1431,6 +1448,21 @@ func (c *Client) addReportHistoryAndSetPreferredDERP(rs *reportState, r *Report,
// which undoes any region change we made above.
r.PreferredDERP = prevDERP
}
+ if c.ForcePreferredDERP != 0 {
+ // If the forced DERP region probed successfully, or has recent traffic,
+ // use it.
+ _, haveLatencySample := r.RegionLatency[c.ForcePreferredDERP]
+ var recentActivity bool
+ if lastHeard := rs.opts.getLastDERPActivity(c.ForcePreferredDERP); !lastHeard.IsZero() {
+ now := c.timeNow()
+ recentActivity = lastHeard.After(rs.start)
+ recentActivity = recentActivity || lastHeard.After(now.Add(-PreferredDERPFrameTime))
+ }
+
+ if haveLatencySample || recentActivity {
+ r.PreferredDERP = c.ForcePreferredDERP
+ }
+ }
}
func updateLatency(m map[int]time.Duration, regionID int, d time.Duration) {
@@ -1476,8 +1508,8 @@ func (rs *reportState) runProbe(ctx context.Context, dm *tailcfg.DERPMap, probe
return
}
- addr := c.nodeAddr(ctx, node, probe.proto)
- if !addr.IsValid() {
+ addr, ok := c.nodeAddrPort(ctx, node, node.STUNPort, probe.proto)
+ if !ok {
c.logf("netcheck.runProbe: named node %q has no %v address", probe.node, probe.proto)
return
}
@@ -1526,12 +1558,20 @@ func (rs *reportState) runProbe(ctx context.Context, dm *tailcfg.DERPMap, probe
c.vlogf("sent to %v", addr)
}
-// proto is 4 or 6
-// If it returns nil, the node is skipped.
-func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeProto) (ap netip.AddrPort) {
- port := cmp.Or(n.STUNPort, 3478)
+// nodeAddrPort returns the IP:port to send a STUN queries to for a given node.
+//
+// The provided port should be n.STUNPort, which may be negative to disable STUN.
+// If STUN is disabled for this node, it returns ok=false.
+// The port parameter is separate for the ICMP caller to provide a fake value.
+//
+// proto is [probeIPv4] or [probeIPv6].
+func (c *Client) nodeAddrPort(ctx context.Context, n *tailcfg.DERPNode, port int, proto probeProto) (_ netip.AddrPort, ok bool) {
+ var zero netip.AddrPort
if port < 0 || port > 1<<16-1 {
- return
+ return zero, false
+ }
+ if port == 0 {
+ port = 3478
}
if n.STUNTestIP != "" {
ip, err := netip.ParseAddr(n.STUNTestIP)
@@ -1544,7 +1584,7 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP
if proto == probeIPv6 && ip.Is4() {
return
}
- return netip.AddrPortFrom(ip, uint16(port))
+ return netip.AddrPortFrom(ip, uint16(port)), true
}
switch proto {
@@ -1552,20 +1592,20 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP
if n.IPv4 != "" {
ip, _ := netip.ParseAddr(n.IPv4)
if !ip.Is4() {
- return
+ return zero, false
}
- return netip.AddrPortFrom(ip, uint16(port))
+ return netip.AddrPortFrom(ip, uint16(port)), true
}
case probeIPv6:
if n.IPv6 != "" {
ip, _ := netip.ParseAddr(n.IPv6)
if !ip.Is6() {
- return
+ return zero, false
}
- return netip.AddrPortFrom(ip, uint16(port))
+ return netip.AddrPortFrom(ip, uint16(port)), true
}
default:
- return
+ return zero, false
}
// The default lookup function if we don't set UseDNSCache is to use net.DefaultResolver.
@@ -1607,13 +1647,13 @@ func (c *Client) nodeAddr(ctx context.Context, n *tailcfg.DERPNode, proto probeP
addrs, err := lookupIPAddr(ctx, n.HostName)
for _, a := range addrs {
if (a.Is4() && probeIsV4) || (a.Is6() && !probeIsV4) {
- return netip.AddrPortFrom(a, uint16(port))
+ return netip.AddrPortFrom(a, uint16(port)), true
}
}
if err != nil {
c.logf("netcheck: DNS lookup error for %q (node %q region %v): %v", n.HostName, n.Name, n.RegionID, err)
}
- return
+ return zero, false
}
func regionHasDERPNode(r *tailcfg.DERPRegion) bool {
diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go
index b1256be10b059..88c19623d0f0a 100644
--- a/net/netcheck/netcheck_test.go
+++ b/net/netcheck/netcheck_test.go
@@ -28,6 +28,9 @@ func newTestClient(t testing.TB) *Client {
c := &Client{
NetMon: netmon.NewStatic(),
Logf: t.Logf,
+ TimeNow: func() time.Time {
+ return time.Unix(1729624521, 0)
+ },
}
return c
}
@@ -38,7 +41,7 @@ func TestBasic(t *testing.T) {
c := newTestClient(t)
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if err := c.Standalone(ctx, "127.0.0.1:0"); err != nil {
@@ -52,6 +55,9 @@ func TestBasic(t *testing.T) {
if !r.UDP {
t.Error("want UDP")
}
+ if r.Now.IsZero() {
+ t.Error("Now is zero")
+ }
if len(r.RegionLatency) != 1 {
t.Errorf("expected 1 key in DERPLatency; got %+v", r.RegionLatency)
}
@@ -117,7 +123,7 @@ func TestWorksWhenUDPBlocked(t *testing.T) {
c := newTestClient(t)
- ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
+ ctx, cancel := context.WithCancel(context.Background())
defer cancel()
r, err := c.GetReport(ctx, dm, nil)
@@ -130,6 +136,14 @@ func TestWorksWhenUDPBlocked(t *testing.T) {
want := newReport()
+ // The Now field can't be compared with reflect.DeepEqual; check using
+ // the Equal method and then overwrite it so that the comparison below
+ // succeeds.
+ if !r.Now.Equal(c.TimeNow()) {
+ t.Errorf("Now = %v; want %v", r.Now, c.TimeNow())
+ }
+ want.Now = r.Now
+
// The IPv4CanSend flag gets set differently across platforms.
// On Windows this test detects false, while on Linux detects true.
// That's not relevant to this test, so just accept what we're
@@ -187,6 +201,7 @@ func TestAddReportHistoryAndSetPreferredDERP(t *testing.T) {
steps []step
homeParams *tailcfg.DERPHomeParams
opts *GetReportOpts
+ forcedDERP int // if non-zero, force this DERP to be the preferred one
wantDERP int // want PreferredDERP on final step
wantPrevLen int // wanted len(c.prev)
}{
@@ -343,12 +358,74 @@ func TestAddReportHistoryAndSetPreferredDERP(t *testing.T) {
wantPrevLen: 3,
wantDERP: 2, // moved to d2 since d1 is gone
},
+ {
+ name: "preferred_derp_hysteresis_no_switch_pct",
+ steps: []step{
+ {0 * time.Second, report("d1", 34*time.Millisecond, "d2", 35*time.Millisecond)},
+ {1 * time.Second, report("d1", 34*time.Millisecond, "d2", 23*time.Millisecond)},
+ },
+ wantPrevLen: 2,
+ wantDERP: 1, // diff is 11ms, but d2 is greater than 2/3s of d1
+ },
+ {
+ name: "forced_two",
+ steps: []step{
+ {time.Second, report("d1", 2, "d2", 3)},
+ {2 * time.Second, report("d1", 4, "d2", 3)},
+ },
+ forcedDERP: 2,
+ wantPrevLen: 2,
+ wantDERP: 2,
+ },
+ {
+ name: "forced_two_unavailable",
+ steps: []step{
+ {time.Second, report("d1", 2, "d2", 1)},
+ {2 * time.Second, report("d1", 4)},
+ },
+ forcedDERP: 2,
+ wantPrevLen: 2,
+ wantDERP: 1,
+ },
+ {
+ name: "forced_two_no_probe_recent_activity",
+ steps: []step{
+ {time.Second, report("d1", 2)},
+ {2 * time.Second, report("d1", 4)},
+ },
+ opts: &GetReportOpts{
+ GetLastDERPActivity: mkLDAFunc(map[int]time.Time{
+ 1: startTime,
+ 2: startTime.Add(time.Second),
+ }),
+ },
+ forcedDERP: 2,
+ wantPrevLen: 2,
+ wantDERP: 2,
+ },
+ {
+ name: "forced_two_no_probe_no_recent_activity",
+ steps: []step{
+ {time.Second, report("d1", 2)},
+ {PreferredDERPFrameTime + time.Second, report("d1", 4)},
+ },
+ opts: &GetReportOpts{
+ GetLastDERPActivity: mkLDAFunc(map[int]time.Time{
+ 1: startTime,
+ 2: startTime,
+ }),
+ },
+ forcedDERP: 2,
+ wantPrevLen: 2,
+ wantDERP: 1,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeTime := startTime
c := &Client{
- TimeNow: func() time.Time { return fakeTime },
+ TimeNow: func() time.Time { return fakeTime },
+ ForcePreferredDERP: tt.forcedDERP,
}
dm := &tailcfg.DERPMap{HomeParams: tt.homeParams}
rs := &reportState{
@@ -864,8 +941,8 @@ func TestNodeAddrResolve(t *testing.T) {
c.UseDNSCache = tt
t.Run("IPv4", func(t *testing.T) {
- ap := c.nodeAddr(ctx, dn, probeIPv4)
- if !ap.IsValid() {
+ ap, ok := c.nodeAddrPort(ctx, dn, dn.STUNPort, probeIPv4)
+ if !ok {
t.Fatal("expected valid AddrPort")
}
if !ap.Addr().Is4() {
@@ -879,8 +956,8 @@ func TestNodeAddrResolve(t *testing.T) {
t.Skipf("IPv6 may not work on this machine")
}
- ap := c.nodeAddr(ctx, dn, probeIPv6)
- if !ap.IsValid() {
+ ap, ok := c.nodeAddrPort(ctx, dn, dn.STUNPort, probeIPv6)
+ if !ok {
t.Fatal("expected valid AddrPort")
}
if !ap.Addr().Is6() {
@@ -889,8 +966,8 @@ func TestNodeAddrResolve(t *testing.T) {
t.Logf("got IPv6 addr: %v", ap)
})
t.Run("IPv6 Failure", func(t *testing.T) {
- ap := c.nodeAddr(ctx, dnV4Only, probeIPv6)
- if ap.IsValid() {
+ ap, ok := c.nodeAddrPort(ctx, dnV4Only, dn.STUNPort, probeIPv6)
+ if ok {
t.Fatalf("expected no addr but got: %v", ap)
}
t.Logf("correctly got invalid addr")
@@ -910,3 +987,30 @@ func TestReportTimeouts(t *testing.T) {
t.Errorf("ReportTimeout (%v) cannot be less than httpsProbeTimeout (%v)", ReportTimeout, httpsProbeTimeout)
}
}
+
+func TestNoUDPNilGetReportOpts(t *testing.T) {
+ blackhole, err := net.ListenPacket("udp4", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("failed to open blackhole STUN listener: %v", err)
+ }
+ defer blackhole.Close()
+
+ dm := stuntest.DERPMapOf(blackhole.LocalAddr().String())
+ for _, region := range dm.Regions {
+ for _, n := range region.Nodes {
+ n.STUNOnly = false // exercise ICMP & HTTPS probing
+ }
+ }
+
+ c := newTestClient(t)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ r, err := c.GetReport(ctx, dm, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if r.UDP {
+ t.Fatal("unexpected working UDP")
+ }
+}
diff --git a/net/netmon/interfaces_android.go b/net/netmon/interfaces_android.go
index a96423eb6bfeb..26104e879a393 100644
--- a/net/netmon/interfaces_android.go
+++ b/net/netmon/interfaces_android.go
@@ -5,7 +5,6 @@ package netmon
import (
"bytes"
- "errors"
"log"
"net/netip"
"os/exec"
@@ -15,7 +14,7 @@ import (
"golang.org/x/sys/unix"
"tailscale.com/net/netaddr"
"tailscale.com/syncs"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
)
var (
@@ -34,11 +33,6 @@ func init() {
var procNetRouteErr atomic.Bool
-// errStopReading is a sentinel error value used internally by
-// lineread.File callers to stop reading. It doesn't escape to
-// callers/users.
-var errStopReading = errors.New("stop reading")
-
/*
Parse 10.0.0.1 out of:
@@ -54,44 +48,42 @@ func likelyHomeRouterIPAndroid() (ret netip.Addr, myIP netip.Addr, ok bool) {
}
lineNum := 0
var f []mem.RO
- err := lineread.File(procNetRoutePath, func(line []byte) error {
+ for lr := range lineiter.File(procNetRoutePath) {
+ line, err := lr.Value()
+ if err != nil {
+ procNetRouteErr.Store(true)
+ return likelyHomeRouterIP()
+ }
+
lineNum++
if lineNum == 1 {
// Skip header line.
- return nil
+ continue
}
if lineNum > maxProcNetRouteRead {
- return errStopReading
+ break
}
f = mem.AppendFields(f[:0], mem.B(line))
if len(f) < 4 {
- return nil
+ continue
}
gwHex, flagsHex := f[2], f[3]
flags, err := mem.ParseUint(flagsHex, 16, 16)
if err != nil {
- return nil // ignore error, skip line and keep going
+ continue // ignore error, skip line and keep going
}
if flags&(unix.RTF_UP|unix.RTF_GATEWAY) != unix.RTF_UP|unix.RTF_GATEWAY {
- return nil
+ continue
}
ipu32, err := mem.ParseUint(gwHex, 16, 32)
if err != nil {
- return nil // ignore error, skip line and keep going
+ continue // ignore error, skip line and keep going
}
ip := netaddr.IPv4(byte(ipu32), byte(ipu32>>8), byte(ipu32>>16), byte(ipu32>>24))
if ip.IsPrivate() {
ret = ip
- return errStopReading
+ break
}
- return nil
- })
- if errors.Is(err, errStopReading) {
- err = nil
- }
- if err != nil {
- procNetRouteErr.Store(true)
- return likelyHomeRouterIP()
}
if ret.IsValid() {
// Try to get the local IP of the interface associated with
@@ -144,23 +136,26 @@ func likelyHomeRouterIPHelper() (ret netip.Addr, _ netip.Addr, ok bool) {
return
}
// Search for line like "default via 10.0.2.2 dev radio0 table 1016 proto static mtu 1500 "
- lineread.Reader(out, func(line []byte) error {
+ for lr := range lineiter.Reader(out) {
+ line, err := lr.Value()
+ if err != nil {
+ break
+ }
const pfx = "default via "
if !mem.HasPrefix(mem.B(line), mem.S(pfx)) {
- return nil
+ continue
}
line = line[len(pfx):]
sp := bytes.IndexByte(line, ' ')
if sp == -1 {
- return nil
+ continue
}
ipb := line[:sp]
if ip, err := netip.ParseAddr(string(ipb)); err == nil && ip.Is4() {
ret = ip
log.Printf("interfaces: found Android default route %v", ip)
}
- return nil
- })
+ }
cmd.Process.Kill()
cmd.Wait()
return ret, netip.Addr{}, ret.IsValid()
diff --git a/net/netmon/interfaces_darwin_test.go b/net/netmon/interfaces_darwin_test.go
index d34040d60d31d..d756d13348bc3 100644
--- a/net/netmon/interfaces_darwin_test.go
+++ b/net/netmon/interfaces_darwin_test.go
@@ -4,14 +4,13 @@
package netmon
import (
- "errors"
"io"
"net/netip"
"os/exec"
"testing"
"go4.org/mem"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
"tailscale.com/version"
)
@@ -73,31 +72,34 @@ func likelyHomeRouterIPDarwinExec() (ret netip.Addr, netif string, ok bool) {
defer io.Copy(io.Discard, stdout) // clear the pipe to prevent hangs
var f []mem.RO
- lineread.Reader(stdout, func(lineb []byte) error {
+ for lr := range lineiter.Reader(stdout) {
+ lineb, err := lr.Value()
+ if err != nil {
+ break
+ }
line := mem.B(lineb)
if !mem.Contains(line, mem.S("default")) {
- return nil
+ continue
}
f = mem.AppendFields(f[:0], line)
if len(f) < 4 || !f[0].EqualString("default") {
- return nil
+ continue
}
ipm, flagsm, netifm := f[1], f[2], f[3]
if !mem.Contains(flagsm, mem.S("G")) {
- return nil
+ continue
}
if mem.Contains(flagsm, mem.S("I")) {
- return nil
+ continue
}
ip, err := netip.ParseAddr(string(mem.Append(nil, ipm)))
if err == nil && ip.IsPrivate() {
ret = ip
netif = netifm.StringCopy()
// We've found what we're looking for.
- return errStopReadingNetstatTable
+ break
}
- return nil
- })
+ }
return ret, netif, ret.IsValid()
}
@@ -110,5 +112,3 @@ func TestFetchRoutingTable(t *testing.T) {
}
}
}
-
-var errStopReadingNetstatTable = errors.New("found private gateway")
diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go
index 299f3101ea73b..d0fb15ababe9e 100644
--- a/net/netmon/interfaces_linux.go
+++ b/net/netmon/interfaces_linux.go
@@ -23,7 +23,7 @@ import (
"go4.org/mem"
"golang.org/x/sys/unix"
"tailscale.com/net/netaddr"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
)
func init() {
@@ -32,11 +32,6 @@ func init() {
var procNetRouteErr atomic.Bool
-// errStopReading is a sentinel error value used internally by
-// lineread.File callers to stop reading. It doesn't escape to
-// callers/users.
-var errStopReading = errors.New("stop reading")
-
/*
Parse 10.0.0.1 out of:
@@ -52,44 +47,42 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) {
}
lineNum := 0
var f []mem.RO
- err := lineread.File(procNetRoutePath, func(line []byte) error {
+ for lr := range lineiter.File(procNetRoutePath) {
+ line, err := lr.Value()
+ if err != nil {
+ procNetRouteErr.Store(true)
+ log.Printf("interfaces: failed to read /proc/net/route: %v", err)
+ return ret, myIP, false
+ }
lineNum++
if lineNum == 1 {
// Skip header line.
- return nil
+ continue
}
if lineNum > maxProcNetRouteRead {
- return errStopReading
+ break
}
f = mem.AppendFields(f[:0], mem.B(line))
if len(f) < 4 {
- return nil
+ continue
}
gwHex, flagsHex := f[2], f[3]
flags, err := mem.ParseUint(flagsHex, 16, 16)
if err != nil {
- return nil // ignore error, skip line and keep going
+ continue // ignore error, skip line and keep going
}
if flags&(unix.RTF_UP|unix.RTF_GATEWAY) != unix.RTF_UP|unix.RTF_GATEWAY {
- return nil
+ continue
}
ipu32, err := mem.ParseUint(gwHex, 16, 32)
if err != nil {
- return nil // ignore error, skip line and keep going
+ continue // ignore error, skip line and keep going
}
ip := netaddr.IPv4(byte(ipu32), byte(ipu32>>8), byte(ipu32>>16), byte(ipu32>>24))
if ip.IsPrivate() {
ret = ip
- return errStopReading
+ break
}
- return nil
- })
- if errors.Is(err, errStopReading) {
- err = nil
- }
- if err != nil {
- procNetRouteErr.Store(true)
- log.Printf("interfaces: failed to read /proc/net/route: %v", err)
}
if ret.IsValid() {
// Try to get the local IP of the interface associated with
diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go
index cc630112523fa..e89e2d04794e5 100644
--- a/net/netmon/netmon_darwin.go
+++ b/net/netmon/netmon_darwin.go
@@ -56,7 +56,18 @@ func (m *darwinRouteMon) Receive() (message, error) {
if err != nil {
return nil, err
}
- msgs, err := route.ParseRIB(route.RIBTypeRoute, m.buf[:n])
+ msgs, err := func() (msgs []route.Message, err error) {
+ defer func() {
+ // TODO(raggi,#14201): remove once we've got a fix from
+ // golang/go#70528.
+ msg := recover()
+ if msg != nil {
+ msgs = nil
+ err = fmt.Errorf("panic in route.ParseRIB: %s", msg)
+ }
+ }()
+ return route.ParseRIB(route.RIBTypeRoute, m.buf[:n])
+ }()
if err != nil {
if debugRouteMessages {
m.logf("read %d bytes (% 02x), failed to parse RIB: %v", n, m.buf[:n], err)
diff --git a/net/netmon/netmon_linux_test.go b/net/netmon/netmon_linux_test.go
index d09fac26aecee..75d7c646559f1 100644
--- a/net/netmon/netmon_linux_test.go
+++ b/net/netmon/netmon_linux_test.go
@@ -1,6 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
+//go:build linux && !android
+
package netmon
import (
diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go
index 0d651537fac9a..4a5befa1d2fef 100644
--- a/net/socks5/socks5.go
+++ b/net/socks5/socks5.go
@@ -81,6 +81,12 @@ const (
addrTypeNotSupported replyCode = 8
)
+// UDP conn default buffer size and read timeout.
+const (
+ bufferSize = 8 * 1024
+ readTimeout = 5 * time.Second
+)
+
// Server is a SOCKS5 proxy server.
type Server struct {
// Logf optionally specifies the logger to use.
@@ -143,7 +149,8 @@ type Conn struct {
clientConn net.Conn
request *request
- udpClientAddr net.Addr
+ udpClientAddr net.Addr
+ udpTargetConns map[socksAddr]net.Conn
}
// Run starts the new connection.
@@ -276,15 +283,6 @@ func (c *Conn) handleUDP() error {
}
defer clientUDPConn.Close()
- serverUDPConn, err := net.ListenPacket("udp", "[::]:0")
- if err != nil {
- res := errorResponse(generalFailure)
- buf, _ := res.marshal()
- c.clientConn.Write(buf)
- return err
- }
- defer serverUDPConn.Close()
-
bindAddr, bindPort, err := splitHostPort(clientUDPConn.LocalAddr().String())
if err != nil {
return err
@@ -305,25 +303,32 @@ func (c *Conn) handleUDP() error {
}
c.clientConn.Write(buf)
- return c.transferUDP(c.clientConn, clientUDPConn, serverUDPConn)
+ return c.transferUDP(c.clientConn, clientUDPConn)
}
-func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, targetConn net.PacketConn) error {
+func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- const bufferSize = 8 * 1024
- const readTimeout = 5 * time.Second
// client -> target
go func() {
defer cancel()
+
+ c.udpTargetConns = make(map[socksAddr]net.Conn)
+ // close all target udp connections when the client connection is closed
+ defer func() {
+ for _, conn := range c.udpTargetConns {
+ _ = conn.Close()
+ }
+ }()
+
buf := make([]byte, bufferSize)
for {
select {
case <-ctx.Done():
return
default:
- err := c.handleUDPRequest(clientConn, targetConn, buf, readTimeout)
+ err := c.handleUDPRequest(ctx, clientConn, buf)
if err != nil {
if isTimeout(err) {
continue
@@ -337,21 +342,44 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, ta
}
}()
+ // A UDP association terminates when the TCP connection that the UDP
+ // ASSOCIATE request arrived on terminates. RFC1928
+ _, err := io.Copy(io.Discard, associatedTCP)
+ if err != nil {
+ err = fmt.Errorf("udp associated tcp conn: %w", err)
+ }
+ return err
+}
+
+func (c *Conn) getOrDialTargetConn(
+ ctx context.Context,
+ clientConn net.PacketConn,
+ targetAddr socksAddr,
+) (net.Conn, error) {
+ conn, exist := c.udpTargetConns[targetAddr]
+ if exist {
+ return conn, nil
+ }
+ conn, err := c.srv.dial(ctx, "udp", targetAddr.hostPort())
+ if err != nil {
+ return nil, err
+ }
+ c.udpTargetConns[targetAddr] = conn
+
// target -> client
go func() {
- defer cancel()
buf := make([]byte, bufferSize)
for {
select {
case <-ctx.Done():
return
default:
- err := c.handleUDPResponse(targetConn, clientConn, buf, readTimeout)
+ err := c.handleUDPResponse(clientConn, targetAddr, conn, buf)
if err != nil {
if isTimeout(err) {
continue
}
- if errors.Is(err, net.ErrClosed) {
+ if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) {
return
}
c.logf("udp transfer: handle udp response fail: %v", err)
@@ -360,20 +388,13 @@ func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, ta
}
}()
- // A UDP association terminates when the TCP connection that the UDP
- // ASSOCIATE request arrived on terminates. RFC1928
- _, err := io.Copy(io.Discard, associatedTCP)
- if err != nil {
- err = fmt.Errorf("udp associated tcp conn: %w", err)
- }
- return err
+ return conn, nil
}
func (c *Conn) handleUDPRequest(
+ ctx context.Context,
clientConn net.PacketConn,
- targetConn net.PacketConn,
buf []byte,
- readTimeout time.Duration,
) error {
// add a deadline for the read to avoid blocking forever
_ = clientConn.SetReadDeadline(time.Now().Add(readTimeout))
@@ -386,38 +407,35 @@ func (c *Conn) handleUDPRequest(
if err != nil {
return fmt.Errorf("parse udp request: %w", err)
}
- targetAddr, err := net.ResolveUDPAddr("udp", req.addr.hostPort())
+
+ targetConn, err := c.getOrDialTargetConn(ctx, clientConn, req.addr)
if err != nil {
- c.logf("resolve target addr fail: %v", err)
+ return fmt.Errorf("dial target %s fail: %w", req.addr, err)
}
- nn, err := targetConn.WriteTo(data, targetAddr)
+ nn, err := targetConn.Write(data)
if err != nil {
- return fmt.Errorf("write to target %s fail: %w", targetAddr, err)
+ return fmt.Errorf("write to target %s fail: %w", req.addr, err)
}
if nn != len(data) {
- return fmt.Errorf("write to target %s fail: %w", targetAddr, io.ErrShortWrite)
+ return fmt.Errorf("write to target %s fail: %w", req.addr, io.ErrShortWrite)
}
return nil
}
func (c *Conn) handleUDPResponse(
- targetConn net.PacketConn,
clientConn net.PacketConn,
+ targetAddr socksAddr,
+ targetConn net.Conn,
buf []byte,
- readTimeout time.Duration,
) error {
// add a deadline for the read to avoid blocking forever
_ = targetConn.SetReadDeadline(time.Now().Add(readTimeout))
- n, addr, err := targetConn.ReadFrom(buf)
+ n, err := targetConn.Read(buf)
if err != nil {
return fmt.Errorf("read from target: %w", err)
}
- host, port, err := splitHostPort(addr.String())
- if err != nil {
- return fmt.Errorf("split host port: %w", err)
- }
- hdr := udpRequest{addr: socksAddr{addrType: getAddrType(host), addr: host, port: port}}
+ hdr := udpRequest{addr: targetAddr}
pkt, err := hdr.marshal()
if err != nil {
return fmt.Errorf("marshal udp request: %w", err)
@@ -627,10 +645,15 @@ func (s socksAddr) marshal() ([]byte, error) {
pkt = binary.BigEndian.AppendUint16(pkt, s.port)
return pkt, nil
}
+
func (s socksAddr) hostPort() string {
return net.JoinHostPort(s.addr, strconv.Itoa(int(s.port)))
}
+func (s socksAddr) String() string {
+ return s.hostPort()
+}
+
// response contains the contents of
// a response packet sent from the proxy
// to the client.
diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go
index 11ea59d4b57d1..bc6fac79fdcf9 100644
--- a/net/socks5/socks5_test.go
+++ b/net/socks5/socks5_test.go
@@ -169,12 +169,25 @@ func TestReadPassword(t *testing.T) {
func TestUDP(t *testing.T) {
// backend UDP server which we'll use SOCKS5 to connect to
- listener, err := net.ListenPacket("udp", ":0")
- if err != nil {
- t.Fatal(err)
+ newUDPEchoServer := func() net.PacketConn {
+ listener, err := net.ListenPacket("udp", ":0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ go udpEchoServer(listener)
+ return listener
}
- backendServerPort := listener.LocalAddr().(*net.UDPAddr).Port
- go udpEchoServer(listener)
+
+ const echoServerNumber = 3
+ echoServerListener := make([]net.PacketConn, echoServerNumber)
+ for i := 0; i < echoServerNumber; i++ {
+ echoServerListener[i] = newUDPEchoServer()
+ }
+ defer func() {
+ for i := 0; i < echoServerNumber; i++ {
+ _ = echoServerListener[i].Close()
+ }
+ }()
// SOCKS5 server
socks5, err := net.Listen("tcp", ":0")
@@ -184,84 +197,93 @@ func TestUDP(t *testing.T) {
socks5Port := socks5.Addr().(*net.TCPAddr).Port
go socks5Server(socks5)
- // net/proxy don't support UDP, so we need to manually send the SOCKS5 UDP request
- conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", socks5Port))
- if err != nil {
- t.Fatal(err)
- }
- _, err = conn.Write([]byte{0x05, 0x01, 0x00}) // client hello with no auth
- if err != nil {
- t.Fatal(err)
- }
- buf := make([]byte, 1024)
- n, err := conn.Read(buf) // server hello
- if err != nil {
- t.Fatal(err)
- }
- if n != 2 || buf[0] != 0x05 || buf[1] != 0x00 {
- t.Fatalf("got: %q want: 0x05 0x00", buf[:n])
- }
+ // make a socks5 udpAssociate conn
+ newUdpAssociateConn := func() (socks5Conn net.Conn, socks5UDPAddr socksAddr) {
+ // net/proxy don't support UDP, so we need to manually send the SOCKS5 UDP request
+ conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", socks5Port))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = conn.Write([]byte{socks5Version, 0x01, noAuthRequired}) // client hello with no auth
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := make([]byte, 1024)
+ n, err := conn.Read(buf) // server hello
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 2 || buf[0] != socks5Version || buf[1] != noAuthRequired {
+ t.Fatalf("got: %q want: 0x05 0x00", buf[:n])
+ }
- targetAddr := socksAddr{
- addrType: domainName,
- addr: "localhost",
- port: uint16(backendServerPort),
- }
- targetAddrPkt, err := targetAddr.marshal()
- if err != nil {
- t.Fatal(err)
- }
- _, err = conn.Write(append([]byte{0x05, 0x03, 0x00}, targetAddrPkt...)) // client reqeust
- if err != nil {
- t.Fatal(err)
- }
+ targetAddr := socksAddr{addrType: ipv4, addr: "0.0.0.0", port: 0}
+ targetAddrPkt, err := targetAddr.marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = conn.Write(append([]byte{socks5Version, byte(udpAssociate), 0x00}, targetAddrPkt...)) // client reqeust
+ if err != nil {
+ t.Fatal(err)
+ }
- n, err = conn.Read(buf) // server response
- if err != nil {
- t.Fatal(err)
- }
- if n < 3 || !bytes.Equal(buf[:3], []byte{0x05, 0x00, 0x00}) {
- t.Fatalf("got: %q want: 0x05 0x00 0x00", buf[:n])
+ n, err = conn.Read(buf) // server response
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n < 3 || !bytes.Equal(buf[:3], []byte{socks5Version, 0x00, 0x00}) {
+ t.Fatalf("got: %q want: 0x05 0x00 0x00", buf[:n])
+ }
+ udpProxySocksAddr, err := parseSocksAddr(bytes.NewReader(buf[3:n]))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return conn, udpProxySocksAddr
}
- udpProxySocksAddr, err := parseSocksAddr(bytes.NewReader(buf[3:n]))
- if err != nil {
- t.Fatal(err)
+
+ conn, udpProxySocksAddr := newUdpAssociateConn()
+ defer conn.Close()
+
+ sendUDPAndWaitResponse := func(socks5UDPConn net.Conn, addr socksAddr, body []byte) (responseBody []byte) {
+ udpPayload, err := (&udpRequest{addr: addr}).marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ udpPayload = append(udpPayload, body...)
+ _, err = socks5UDPConn.Write(udpPayload)
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := make([]byte, 1024)
+ n, err := socks5UDPConn.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, responseBody, err = parseUDPRequest(buf[:n])
+ if err != nil {
+ t.Fatal(err)
+ }
+ return responseBody
}
udpProxyAddr, err := net.ResolveUDPAddr("udp", udpProxySocksAddr.hostPort())
if err != nil {
t.Fatal(err)
}
- udpConn, err := net.DialUDP("udp", nil, udpProxyAddr)
- if err != nil {
- t.Fatal(err)
- }
- udpPayload, err := (&udpRequest{addr: targetAddr}).marshal()
- if err != nil {
- t.Fatal(err)
- }
- udpPayload = append(udpPayload, []byte("Test")...)
- _, err = udpConn.Write(udpPayload) // send udp package
- if err != nil {
- t.Fatal(err)
- }
- n, _, err = udpConn.ReadFrom(buf)
- if err != nil {
- t.Fatal(err)
- }
- _, responseBody, err := parseUDPRequest(buf[:n]) // read udp response
- if err != nil {
- t.Fatal(err)
- }
- if string(responseBody) != "Test" {
- t.Fatalf("got: %q want: Test", responseBody)
- }
- err = udpConn.Close()
+ socks5UDPConn, err := net.DialUDP("udp", nil, udpProxyAddr)
if err != nil {
t.Fatal(err)
}
- err = conn.Close()
- if err != nil {
- t.Fatal(err)
+ defer socks5UDPConn.Close()
+
+ for i := 0; i < echoServerNumber; i++ {
+ port := echoServerListener[i].LocalAddr().(*net.UDPAddr).Port
+ addr := socksAddr{addrType: ipv4, addr: "127.0.0.1", port: uint16(port)}
+ requestBody := []byte(fmt.Sprintf("Test %d", i))
+ responseBody := sendUDPAndWaitResponse(socks5UDPConn, addr, requestBody)
+ if !bytes.Equal(requestBody, responseBody) {
+ t.Fatalf("got: %q want: %q", responseBody, requestBody)
+ }
}
}
diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go
new file mode 100644
index 0000000000000..57dc7a6e6d885
--- /dev/null
+++ b/net/tlsdial/blockblame/blockblame.go
@@ -0,0 +1,104 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package blockblame blames specific firewall manufacturers for blocking Tailscale,
+// by analyzing the SSL certificate presented when attempting to connect to a remote
+// server.
+package blockblame
+
+import (
+ "crypto/x509"
+ "strings"
+)
+
+// VerifyCertificate checks if the given certificate c is issued by a firewall manufacturer
+// that is known to block Tailscale connections. It returns true and the Manufacturer of
+// the equipment if it is, or false and nil if it is not.
+func VerifyCertificate(c *x509.Certificate) (m *Manufacturer, ok bool) {
+ for _, m := range Manufacturers {
+ if m.match != nil && m.match(c) {
+ return m, true
+ }
+ }
+ return nil, false
+}
+
+// Manufacturer represents a firewall manufacturer that may be blocking Tailscale.
+type Manufacturer struct {
+ // Name is the name of the firewall manufacturer to be
+ // mentioned in health warning messages, e.g. "Fortinet".
+ Name string
+ // match is a function that returns true if the given certificate looks like it might
+ // be issued by this manufacturer.
+ match matchFunc
+}
+
+var Manufacturers = []*Manufacturer{
+ {
+ Name: "Aruba Networks",
+ match: issuerContains("Aruba"),
+ },
+ {
+ Name: "Cisco",
+ match: issuerContains("Cisco"),
+ },
+ {
+ Name: "Fortinet",
+ match: matchAny(
+ issuerContains("Fortinet"),
+ certEmail("support@fortinet.com"),
+ ),
+ },
+ {
+ Name: "Huawei",
+ match: certEmail("mobile@huawei.com"),
+ },
+ {
+ Name: "Palo Alto Networks",
+ match: matchAny(
+ issuerContains("Palo Alto Networks"),
+ issuerContains("PAN-FW"),
+ ),
+ },
+ {
+ Name: "Sophos",
+ match: issuerContains("Sophos"),
+ },
+ {
+ Name: "Ubiquiti",
+ match: matchAny(
+ issuerContains("UniFi"),
+ issuerContains("Ubiquiti"),
+ ),
+ },
+}
+
+type matchFunc func(*x509.Certificate) bool
+
+func issuerContains(s string) matchFunc {
+ return func(c *x509.Certificate) bool {
+ return strings.Contains(strings.ToLower(c.Issuer.String()), strings.ToLower(s))
+ }
+}
+
+func certEmail(v string) matchFunc {
+ return func(c *x509.Certificate) bool {
+ for _, email := range c.EmailAddresses {
+ if strings.Contains(strings.ToLower(email), strings.ToLower(v)) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func matchAny(fs ...matchFunc) matchFunc {
+ return func(c *x509.Certificate) bool {
+ for _, f := range fs {
+ if f(c) {
+ return true
+ }
+ }
+ return false
+ }
+}
diff --git a/net/tlsdial/blockblame/blockblame_test.go b/net/tlsdial/blockblame/blockblame_test.go
new file mode 100644
index 0000000000000..6d3592c60a3de
--- /dev/null
+++ b/net/tlsdial/blockblame/blockblame_test.go
@@ -0,0 +1,54 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package blockblame
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "testing"
+)
+
+const controlplaneDotTailscaleDotComPEM = `
+-----BEGIN CERTIFICATE-----
+MIIDkzCCAxqgAwIBAgISA2GOahsftpp59yuHClbDuoduMAoGCCqGSM49BAMDMDIx
+CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF
+NjAeFw0yNDEwMTIxNjE2NDVaFw0yNTAxMTAxNjE2NDRaMCUxIzAhBgNVBAMTGmNv
+bnRyb2xwbGFuZS50YWlsc2NhbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD
+QgAExfraDUc1t185zuGtZlnPDtEJJSDBqvHN4vQcXSzSTPSAdDYHcA8fL5woU2Kg
+jK/2C0wm/rYy2Rre/ulhkS4wB6OCAhswggIXMA4GA1UdDwEB/wQEAwIHgDAdBgNV
+HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4E
+FgQUpArnpDj8Yh6NTgMOZjDPx0TuLmcwHwYDVR0jBBgwFoAUkydGmAOpUWiOmNbE
+QkjbI79YlNIwVQYIKwYBBQUHAQEESTBHMCEGCCsGAQUFBzABhhVodHRwOi8vZTYu
+by5sZW5jci5vcmcwIgYIKwYBBQUHMAKGFmh0dHA6Ly9lNi5pLmxlbmNyLm9yZy8w
+JQYDVR0RBB4wHIIaY29udHJvbHBsYW5lLnRhaWxzY2FsZS5jb20wEwYDVR0gBAww
+CjAIBgZngQwBAgEwggEDBgorBgEEAdZ5AgQCBIH0BIHxAO8AdgDgkrP8DB3I52g2
+H95huZZNClJ4GYpy1nLEsE2lbW9UBAAAAZKBujCyAAAEAwBHMEUCIQDHMgUaL4H9
+ZJa090ZOpBeEVu3+t+EF4HlHI1NqAai6uQIgeY/lLfjAXfcVgxBHHR4zjd0SzhaP
+TREHXzwxzN/8blkAdQDPEVbu1S58r/OHW9lpLpvpGnFnSrAX7KwB0lt3zsw7CAAA
+AZKBujh8AAAEAwBGMEQCICQwhMk45t9aiFjfwOC/y6+hDbszqSCpIv63kFElweUy
+AiAqTdkqmbqUVpnav5JdWkNERVAIlY4jqrThLsCLZYbNszAKBggqhkjOPQQDAwNn
+ADBkAjALyfgAt1XQp1uSfxy4GapR5OsmjEMBRVq6IgsPBlCRBfmf0Q3/a6mF0pjb
+Sj4oa+cCMEhZk4DmBTIdZY9zjuh8s7bXNfKxUQS0pEhALtXqyFr+D5dF7JcQo9+s
+Z98JY7/PCA==
+-----END CERTIFICATE-----`
+
+func TestVerifyCertificateOurControlPlane(t *testing.T) {
+ p, _ := pem.Decode([]byte(controlplaneDotTailscaleDotComPEM))
+ if p == nil {
+ t.Fatalf("failed to extract certificate bytes for controlplane.tailscale.com")
+ return
+ }
+ cert, err := x509.ParseCertificate(p.Bytes)
+ if err != nil {
+ t.Fatalf("failed to parse certificate: %v", err)
+ return
+ }
+ m, found := VerifyCertificate(cert)
+ if found {
+ t.Fatalf("expected to not get a result for the controlplane.tailscale.com certificate")
+ }
+ if m != nil {
+ t.Fatalf("expected nil manufacturer for controlplane.tailscale.com certificate")
+ }
+}
diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go
index a49e7f0f730ee..7e847a8b6a656 100644
--- a/net/tlsdial/tlsdial.go
+++ b/net/tlsdial/tlsdial.go
@@ -27,6 +27,7 @@ import (
"tailscale.com/envknob"
"tailscale.com/health"
"tailscale.com/hostinfo"
+ "tailscale.com/net/tlsdial/blockblame"
)
var counterFallbackOK int32 // atomic
@@ -44,6 +45,16 @@ var debug = envknob.RegisterBool("TS_DEBUG_TLS_DIAL")
// Headscale, etc.
var tlsdialWarningPrinted sync.Map // map[string]bool
+var mitmBlockWarnable = health.Register(&health.Warnable{
+ Code: "blockblame-mitm-detected",
+ Title: "Network may be blocking Tailscale",
+ Text: func(args health.Args) string {
+ return fmt.Sprintf("Network equipment from %q may be blocking Tailscale traffic on this network. Connect to another network, or contact your network administrator for assistance.", args["manufacturer"])
+ },
+ Severity: health.SeverityMedium,
+ ImpactsConnectivity: true,
+})
+
// Config returns a tls.Config for connecting to a server.
// If base is non-nil, it's cloned as the base config before
// being configured and returned.
@@ -86,12 +97,29 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config {
// Perform some health checks on this certificate before we do
// any verification.
+ var cert *x509.Certificate
var selfSignedIssuer string
- if certs := cs.PeerCertificates; len(certs) > 0 && certIsSelfSigned(certs[0]) {
- selfSignedIssuer = certs[0].Issuer.String()
+ if certs := cs.PeerCertificates; len(certs) > 0 {
+ cert = certs[0]
+ if certIsSelfSigned(cert) {
+ selfSignedIssuer = cert.Issuer.String()
+ }
}
if ht != nil {
defer func() {
+ if retErr != nil && cert != nil {
+ // Is it a MITM SSL certificate from a well-known network appliance manufacturer?
+ // Show a dedicated warning.
+ m, ok := blockblame.VerifyCertificate(cert)
+ if ok {
+ log.Printf("tlsdial: server cert for %q looks like %q equipment (could be blocking Tailscale)", host, m.Name)
+ ht.SetUnhealthy(mitmBlockWarnable, health.Args{"manufacturer": m.Name})
+ } else {
+ ht.SetHealthy(mitmBlockWarnable)
+ }
+ } else {
+ ht.SetHealthy(mitmBlockWarnable)
+ }
if retErr != nil && selfSignedIssuer != "" {
// Self-signed certs are never valid.
//
diff --git a/net/tsaddr/tsaddr.go b/net/tsaddr/tsaddr.go
index b951899e22b22..5a8376c4b088b 100644
--- a/net/tsaddr/tsaddr.go
+++ b/net/tsaddr/tsaddr.go
@@ -82,15 +82,21 @@ const (
TailscaleServiceIPv6String = "fd7a:115c:a1e0::53"
)
-// IsTailscaleIP reports whether ip is an IP address in a range that
+// IsTailscaleIP reports whether IP is an IP address in a range that
// Tailscale assigns from.
func IsTailscaleIP(ip netip.Addr) bool {
if ip.Is4() {
- return CGNATRange().Contains(ip) && !PrefixesContainsIP(CGNatOverrideRange(), ip)
+ return IsTailscaleIPv4(ip)
}
return TailscaleULARange().Contains(ip)
}
+// IsTailscaleIPv4 reports whether an IPv4 IP is an IP address that
+// Tailscale assigns from.
+func IsTailscaleIPv4(ip netip.Addr) bool {
+ return CGNATRange().Contains(ip) && !PrefixesContainsIP(CGNatOverrideRange(), ip)
+}
+
// TailscaleULARange returns the IPv6 Unique Local Address range that
// is the superset range that Tailscale assigns out of.
func TailscaleULARange() netip.Prefix {
@@ -209,8 +215,7 @@ func PrefixIs6(p netip.Prefix) bool { return p.Addr().Is6() }
// IPv6 /0 route.
func ContainsExitRoutes(rr views.Slice[netip.Prefix]) bool {
var v4, v6 bool
- for i := range rr.Len() {
- r := rr.At(i)
+ for _, r := range rr.All() {
if r == allIPv4 {
v4 = true
} else if r == allIPv6 {
@@ -223,8 +228,8 @@ func ContainsExitRoutes(rr views.Slice[netip.Prefix]) bool {
// ContainsExitRoute reports whether rr contains at least one of IPv4 or
// IPv6 /0 (exit) routes.
func ContainsExitRoute(rr views.Slice[netip.Prefix]) bool {
- for i := range rr.Len() {
- if rr.At(i).Bits() == 0 {
+ for _, r := range rr.All() {
+ if r.Bits() == 0 {
return true
}
}
@@ -234,8 +239,8 @@ func ContainsExitRoute(rr views.Slice[netip.Prefix]) bool {
// ContainsNonExitSubnetRoutes reports whether v contains Subnet
// Routes other than ExitNode Routes.
func ContainsNonExitSubnetRoutes(rr views.Slice[netip.Prefix]) bool {
- for i := range rr.Len() {
- if rr.At(i).Bits() != 0 {
+ for _, r := range rr.All() {
+ if r.Bits() != 0 {
return true
}
}
diff --git a/net/tsaddr/tsaddr_test.go b/net/tsaddr/tsaddr_test.go
index 4aa2f8c60f5b3..9ac1ce3036299 100644
--- a/net/tsaddr/tsaddr_test.go
+++ b/net/tsaddr/tsaddr_test.go
@@ -222,3 +222,71 @@ func TestContainsExitRoute(t *testing.T) {
}
}
}
+
+func TestIsTailscaleIPv4(t *testing.T) {
+ tests := []struct {
+ in netip.Addr
+ want bool
+ }{
+ {
+ in: netip.MustParseAddr("100.67.19.57"),
+ want: true,
+ },
+ {
+ in: netip.MustParseAddr("10.10.10.10"),
+ want: false,
+ },
+ {
+
+ in: netip.MustParseAddr("fd7a:115c:a1e0:3f2b:7a1d:4e88:9c2b:7f01"),
+ want: false,
+ },
+ {
+ in: netip.MustParseAddr("bc9d:0aa0:1f0a:69ab:eb5c:28e0:5456:a518"),
+ want: false,
+ },
+ {
+ in: netip.MustParseAddr("100.115.92.157"),
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ if got := IsTailscaleIPv4(tt.in); got != tt.want {
+ t.Errorf("IsTailscaleIPv4(%v) = %v, want %v", tt.in, got, tt.want)
+ }
+ }
+}
+
+func TestIsTailscaleIP(t *testing.T) {
+ tests := []struct {
+ in netip.Addr
+ want bool
+ }{
+ {
+ in: netip.MustParseAddr("100.67.19.57"),
+ want: true,
+ },
+ {
+ in: netip.MustParseAddr("10.10.10.10"),
+ want: false,
+ },
+ {
+
+ in: netip.MustParseAddr("fd7a:115c:a1e0:3f2b:7a1d:4e88:9c2b:7f01"),
+ want: true,
+ },
+ {
+ in: netip.MustParseAddr("bc9d:0aa0:1f0a:69ab:eb5c:28e0:5456:a518"),
+ want: false,
+ },
+ {
+ in: netip.MustParseAddr("100.115.92.157"),
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ if got := IsTailscaleIP(tt.in); got != tt.want {
+ t.Errorf("IsTailscaleIP(%v) = %v, want %v", tt.in, got, tt.want)
+ }
+ }
+}
diff --git a/net/tsdial/dnsmap.go b/net/tsdial/dnsmap.go
index f5d13861bb65f..2ef1cb1f171c0 100644
--- a/net/tsdial/dnsmap.go
+++ b/net/tsdial/dnsmap.go
@@ -42,8 +42,8 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap {
if dnsname.HasSuffix(nm.Name, suffix) {
ret[canonMapKey(dnsname.TrimSuffix(nm.Name, suffix))] = ip
}
- for i := range addrs.Len() {
- if addrs.At(i).Addr().Is4() {
+ for _, p := range addrs.All() {
+ if p.Addr().Is4() {
have4 = true
}
}
@@ -52,9 +52,8 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap {
if p.Name() == "" {
continue
}
- for i := range p.Addresses().Len() {
- a := p.Addresses().At(i)
- ip := a.Addr()
+ for _, pfx := range p.Addresses().All() {
+ ip := pfx.Addr()
if ip.Is4() && !have4 {
continue
}
diff --git a/net/tshttpproxy/tshttpproxy_synology.go b/net/tshttpproxy/tshttpproxy_synology.go
index cda95764865d4..2e50d26d3a655 100644
--- a/net/tshttpproxy/tshttpproxy_synology.go
+++ b/net/tshttpproxy/tshttpproxy_synology.go
@@ -17,7 +17,7 @@ import (
"sync"
"time"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
)
// These vars are overridden for tests.
@@ -76,21 +76,22 @@ func synologyProxiesFromConfig() (*url.URL, *url.URL, error) {
func parseSynologyConfig(r io.Reader) (*url.URL, *url.URL, error) {
cfg := map[string]string{}
- if err := lineread.Reader(r, func(line []byte) error {
+ for lr := range lineiter.Reader(r) {
+ line, err := lr.Value()
+ if err != nil {
+ return nil, nil, err
+ }
// accept and skip over empty lines
line = bytes.TrimSpace(line)
if len(line) == 0 {
- return nil
+ continue
}
key, value, ok := strings.Cut(string(line), "=")
if !ok {
- return fmt.Errorf("missing \"=\" in proxy.conf line: %q", line)
+ return nil, nil, fmt.Errorf("missing \"=\" in proxy.conf line: %q", line)
}
cfg[string(key)] = string(value)
- return nil
- }); err != nil {
- return nil, nil, err
}
if cfg["proxy_enabled"] != "yes" {
diff --git a/net/tstun/tap_linux.go b/net/tstun/tap_linux.go
index c721e6e2734b5..8a00a96927c4d 100644
--- a/net/tstun/tap_linux.go
+++ b/net/tstun/tap_linux.go
@@ -6,6 +6,7 @@
package tstun
import (
+ "bytes"
"fmt"
"net"
"net/netip"
@@ -20,10 +21,14 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/checksum"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
+ "gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"tailscale.com/net/netaddr"
"tailscale.com/net/packet"
+ "tailscale.com/net/tsaddr"
+ "tailscale.com/syncs"
"tailscale.com/types/ipproto"
+ "tailscale.com/types/logger"
"tailscale.com/util/multierr"
)
@@ -35,13 +40,13 @@ var ourMAC = net.HardwareAddr{0x30, 0x2D, 0x66, 0xEC, 0x7A, 0x93}
func init() { createTAP = createTAPLinux }
-func createTAPLinux(tapName, bridgeName string) (tun.Device, error) {
+func createTAPLinux(logf logger.Logf, tapName, bridgeName string) (tun.Device, error) {
fd, err := unix.Open("/dev/net/tun", unix.O_RDWR, 0)
if err != nil {
return nil, err
}
- dev, err := openDevice(fd, tapName, bridgeName)
+ dev, err := openDevice(logf, fd, tapName, bridgeName)
if err != nil {
unix.Close(fd)
return nil, err
@@ -50,7 +55,7 @@ func createTAPLinux(tapName, bridgeName string) (tun.Device, error) {
return dev, nil
}
-func openDevice(fd int, tapName, bridgeName string) (tun.Device, error) {
+func openDevice(logf logger.Logf, fd int, tapName, bridgeName string) (tun.Device, error) {
ifr, err := unix.NewIfreq(tapName)
if err != nil {
return nil, err
@@ -71,7 +76,7 @@ func openDevice(fd int, tapName, bridgeName string) (tun.Device, error) {
}
}
- return newTAPDevice(fd, tapName)
+ return newTAPDevice(logf, fd, tapName)
}
type etherType [2]byte
@@ -91,7 +96,7 @@ const (
// handleTAPFrame handles receiving a raw TAP ethernet frame and reports whether
// it's been handled (that is, whether it should NOT be passed to wireguard).
-func (t *Wrapper) handleTAPFrame(ethBuf []byte) bool {
+func (t *tapDevice) handleTAPFrame(ethBuf []byte) bool {
if len(ethBuf) < ethernetFrameSize {
// Corrupt. Ignore.
@@ -154,7 +159,7 @@ func (t *Wrapper) handleTAPFrame(ethBuf []byte) bool {
// If the client's asking about their own IP, tell them it's
// their own MAC. TODO(bradfitz): remove String allocs.
- if net.IP(req.ProtocolAddressTarget()).String() == theClientIP {
+ if net.IP(req.ProtocolAddressTarget()).String() == t.clientIPv4.Load() {
copy(res.HardwareAddressSender(), ethSrcMAC)
} else {
copy(res.HardwareAddressSender(), ourMAC[:])
@@ -164,8 +169,7 @@ func (t *Wrapper) handleTAPFrame(ethBuf []byte) bool {
copy(res.HardwareAddressTarget(), req.HardwareAddressSender())
copy(res.ProtocolAddressTarget(), req.ProtocolAddressSender())
- // TODO(raggi): reduce allocs!
- n, err := t.tdev.Write([][]byte{buf}, 0)
+ n, err := t.WriteEthernet(buf)
if tapDebug {
t.logf("tap: wrote ARP reply %v, %v", n, err)
}
@@ -175,14 +179,17 @@ func (t *Wrapper) handleTAPFrame(ethBuf []byte) bool {
}
}
-// TODO(bradfitz): remove these hard-coded values and move from a /24 to a /10 CGNAT as the range.
-const theClientIP = "100.70.145.3" // TODO: make dynamic from netmap
-const routerIP = "100.70.145.1" // must be in same netmask (currently hack at /24) as theClientIP
+var (
+ // routerIP is the IP address of the DHCP server.
+ routerIP = net.ParseIP(tsaddr.TailscaleServiceIPString)
+ // cgnatNetMask is the netmask of the 100.64.0.0/10 CGNAT range.
+ cgnatNetMask = net.IPMask(net.ParseIP("255.192.0.0").To4())
+)
// handleDHCPRequest handles receiving a raw TAP ethernet frame and reports whether
// it's been handled as a DHCP request. That is, it reports whether the frame should
// be ignored by the caller and not passed on.
-func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
+func (t *tapDevice) handleDHCPRequest(ethBuf []byte) bool {
const udpHeader = 8
if len(ethBuf) < ethernetFrameSize+ipv4HeaderLen+udpHeader {
if tapDebug {
@@ -207,7 +214,7 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
if p.IPProto != ipproto.UDP || p.Src.Port() != 68 || p.Dst.Port() != 67 {
// Not a DHCP request.
if tapDebug {
- t.logf("tap: DHCP wrong meta")
+ t.logf("tap: DHCP wrong meta: %+v", p)
}
return passOnPacket
}
@@ -225,17 +232,22 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
}
switch dp.MessageType() {
case dhcpv4.MessageTypeDiscover:
+ ips := t.clientIPv4.Load()
+ if ips == "" {
+ t.logf("tap: DHCP no client IP")
+ return consumePacket
+ }
offer, err := dhcpv4.New(
dhcpv4.WithReply(dp),
dhcpv4.WithMessageType(dhcpv4.MessageTypeOffer),
- dhcpv4.WithRouter(net.ParseIP(routerIP)), // the default route
- dhcpv4.WithDNS(net.ParseIP("100.100.100.100")),
- dhcpv4.WithServerIP(net.ParseIP("100.100.100.100")), // TODO: what is this?
- dhcpv4.WithOption(dhcpv4.OptServerIdentifier(net.ParseIP("100.100.100.100"))),
- dhcpv4.WithYourIP(net.ParseIP(theClientIP)),
+ dhcpv4.WithRouter(routerIP), // the default route
+ dhcpv4.WithDNS(routerIP),
+ dhcpv4.WithServerIP(routerIP), // TODO: what is this?
+ dhcpv4.WithOption(dhcpv4.OptServerIdentifier(routerIP)),
+ dhcpv4.WithYourIP(net.ParseIP(ips)),
dhcpv4.WithLeaseTime(3600), // hour works
//dhcpv4.WithHwAddr(ethSrcMAC),
- dhcpv4.WithNetmask(net.IPMask(net.ParseIP("255.255.255.0").To4())), // TODO: wrong
+ dhcpv4.WithNetmask(cgnatNetMask),
//dhcpv4.WithTransactionID(dp.TransactionID),
)
if err != nil {
@@ -250,22 +262,26 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
netip.AddrPortFrom(netaddr.IPv4(255, 255, 255, 255), 68), // dst
)
- // TODO(raggi): reduce allocs!
- n, err := t.tdev.Write([][]byte{pkt}, 0)
+ n, err := t.WriteEthernet(pkt)
if tapDebug {
t.logf("tap: wrote DHCP OFFER %v, %v", n, err)
}
case dhcpv4.MessageTypeRequest:
+ ips := t.clientIPv4.Load()
+ if ips == "" {
+ t.logf("tap: DHCP no client IP")
+ return consumePacket
+ }
ack, err := dhcpv4.New(
dhcpv4.WithReply(dp),
dhcpv4.WithMessageType(dhcpv4.MessageTypeAck),
- dhcpv4.WithDNS(net.ParseIP("100.100.100.100")),
- dhcpv4.WithRouter(net.ParseIP(routerIP)), // the default route
- dhcpv4.WithServerIP(net.ParseIP("100.100.100.100")), // TODO: what is this?
- dhcpv4.WithOption(dhcpv4.OptServerIdentifier(net.ParseIP("100.100.100.100"))),
- dhcpv4.WithYourIP(net.ParseIP(theClientIP)), // Hello world
- dhcpv4.WithLeaseTime(3600), // hour works
- dhcpv4.WithNetmask(net.IPMask(net.ParseIP("255.255.255.0").To4())),
+ dhcpv4.WithDNS(routerIP),
+ dhcpv4.WithRouter(routerIP), // the default route
+ dhcpv4.WithServerIP(routerIP), // TODO: what is this?
+ dhcpv4.WithOption(dhcpv4.OptServerIdentifier(routerIP)),
+ dhcpv4.WithYourIP(net.ParseIP(ips)), // Hello world
+ dhcpv4.WithLeaseTime(3600), // hour works
+ dhcpv4.WithNetmask(cgnatNetMask),
)
if err != nil {
t.logf("error building DHCP ack: %v", err)
@@ -278,8 +294,7 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
netip.AddrPortFrom(netaddr.IPv4(100, 100, 100, 100), 67), // src
netip.AddrPortFrom(netaddr.IPv4(255, 255, 255, 255), 68), // dst
)
- // TODO(raggi): reduce allocs!
- n, err := t.tdev.Write([][]byte{pkt}, 0)
+ n, err := t.WriteEthernet(pkt)
if tapDebug {
t.logf("tap: wrote DHCP ACK %v, %v", n, err)
}
@@ -291,6 +306,16 @@ func (t *Wrapper) handleDHCPRequest(ethBuf []byte) bool {
return consumePacket
}
+func writeEthernetFrame(buf []byte, srcMAC, dstMAC net.HardwareAddr, proto tcpip.NetworkProtocolNumber) {
+ // Ethernet header
+ eth := header.Ethernet(buf)
+ eth.Encode(&header.EthernetFields{
+ SrcAddr: tcpip.LinkAddress(srcMAC),
+ DstAddr: tcpip.LinkAddress(dstMAC),
+ Type: proto,
+ })
+}
+
func packLayer2UDP(payload []byte, srcMAC, dstMAC net.HardwareAddr, src, dst netip.AddrPort) []byte {
buf := make([]byte, header.EthernetMinimumSize+header.UDPMinimumSize+header.IPv4MinimumSize+len(payload))
payloadStart := len(buf) - len(payload)
@@ -300,12 +325,7 @@ func packLayer2UDP(payload []byte, srcMAC, dstMAC net.HardwareAddr, src, dst net
dstB := dst.Addr().As4()
dstIP := tcpip.AddrFromSlice(dstB[:])
// Ethernet header
- eth := header.Ethernet(buf)
- eth.Encode(&header.EthernetFields{
- SrcAddr: tcpip.LinkAddress(srcMAC),
- DstAddr: tcpip.LinkAddress(dstMAC),
- Type: ipv4.ProtocolNumber,
- })
+ writeEthernetFrame(buf, srcMAC, dstMAC, ipv4.ProtocolNumber)
// IP header
ipbuf := buf[header.EthernetMinimumSize:]
ip := header.IPv4(ipbuf)
@@ -342,17 +362,18 @@ func run(prog string, args ...string) error {
return nil
}
-func (t *Wrapper) destMAC() [6]byte {
+func (t *tapDevice) destMAC() [6]byte {
return t.destMACAtomic.Load()
}
-func newTAPDevice(fd int, tapName string) (tun.Device, error) {
+func newTAPDevice(logf logger.Logf, fd int, tapName string) (tun.Device, error) {
err := unix.SetNonblock(fd, true)
if err != nil {
return nil, err
}
file := os.NewFile(uintptr(fd), "/dev/tap")
d := &tapDevice{
+ logf: logf,
file: file,
events: make(chan tun.Event),
name: tapName,
@@ -360,20 +381,22 @@ func newTAPDevice(fd int, tapName string) (tun.Device, error) {
return d, nil
}
-var (
- _ setWrapperer = &tapDevice{}
-)
-
type tapDevice struct {
- file *os.File
- events chan tun.Event
- name string
- wrapper *Wrapper
- closeOnce sync.Once
+ file *os.File
+ logf func(format string, args ...any)
+ events chan tun.Event
+ name string
+ closeOnce sync.Once
+ clientIPv4 syncs.AtomicValue[string]
+
+ destMACAtomic syncs.AtomicValue[[6]byte]
}
-func (t *tapDevice) setWrapper(wrapper *Wrapper) {
- t.wrapper = wrapper
+var _ setIPer = (*tapDevice)(nil)
+
+func (t *tapDevice) SetIP(ipV4, ipV6TODO netip.Addr) error {
+ t.clientIPv4.Store(ipV4.String())
+ return nil
}
func (t *tapDevice) File() *os.File {
@@ -384,36 +407,63 @@ func (t *tapDevice) Name() (string, error) {
return t.name, nil
}
+// Read reads an IP packet from the TAP device. It strips the ethernet frame header.
func (t *tapDevice) Read(buffs [][]byte, sizes []int, offset int) (int, error) {
+ n, err := t.ReadEthernet(buffs, sizes, offset)
+ if err != nil || n == 0 {
+ return n, err
+ }
+ // Strip the ethernet frame header.
+ copy(buffs[0][offset:], buffs[0][offset+ethernetFrameSize:offset+sizes[0]])
+ sizes[0] -= ethernetFrameSize
+ return 1, nil
+}
+
+// ReadEthernet reads a raw ethernet frame from the TAP device.
+func (t *tapDevice) ReadEthernet(buffs [][]byte, sizes []int, offset int) (int, error) {
n, err := t.file.Read(buffs[0][offset:])
if err != nil {
return 0, err
}
+ if t.handleTAPFrame(buffs[0][offset : offset+n]) {
+ return 0, nil
+ }
sizes[0] = n
return 1, nil
}
+// WriteEthernet writes a raw ethernet frame to the TAP device.
+func (t *tapDevice) WriteEthernet(buf []byte) (int, error) {
+ return t.file.Write(buf)
+}
+
+// ethBufPool holds a pool of bytes.Buffers for use in [tapDevice.Write].
+var ethBufPool = syncs.Pool[*bytes.Buffer]{New: func() *bytes.Buffer { return new(bytes.Buffer) }}
+
+// Write writes a raw IP packet to the TAP device. It adds the ethernet frame header.
func (t *tapDevice) Write(buffs [][]byte, offset int) (int, error) {
errs := make([]error, 0)
wrote := 0
+ m := t.destMAC()
+ dstMac := net.HardwareAddr(m[:])
+ buf := ethBufPool.Get()
+ defer ethBufPool.Put(buf)
for _, buff := range buffs {
- if offset < ethernetFrameSize {
- errs = append(errs, fmt.Errorf("[unexpected] weird offset %d for TAP write", offset))
- return 0, multierr.New(errs...)
+ buf.Reset()
+ buf.Grow(header.EthernetMinimumSize + len(buff) - offset)
+
+ var ebuf [14]byte
+ switch buff[offset] >> 4 {
+ case 4:
+ writeEthernetFrame(ebuf[:], ourMAC, dstMac, ipv4.ProtocolNumber)
+ case 6:
+ writeEthernetFrame(ebuf[:], ourMAC, dstMac, ipv6.ProtocolNumber)
+ default:
+ continue
}
- eth := buff[offset-ethernetFrameSize:]
- dst := t.wrapper.destMAC()
- copy(eth[:6], dst[:])
- copy(eth[6:12], ourMAC[:])
- et := etherTypeIPv4
- if buff[offset]>>4 == 6 {
- et = etherTypeIPv6
- }
- eth[12], eth[13] = et[0], et[1]
- if tapDebug {
- t.wrapper.logf("tap: tapWrite off=%v % x", offset, buff)
- }
- _, err := t.file.Write(buff[offset-ethernetFrameSize:])
+ buf.Write(ebuf[:])
+ buf.Write(buff[offset:])
+ _, err := t.WriteEthernet(buf.Bytes())
if err != nil {
errs = append(errs, err)
} else {
@@ -428,8 +478,7 @@ func (t *tapDevice) MTU() (int, error) {
if err != nil {
return 0, err
}
- err = unix.IoctlIfreq(int(t.file.Fd()), unix.SIOCGIFMTU, ifr)
- if err != nil {
+ if err := unix.IoctlIfreq(int(t.file.Fd()), unix.SIOCGIFMTU, ifr); err != nil {
return 0, err
}
return int(ifr.Uint32()), nil
diff --git a/net/tstun/tap_unsupported.go b/net/tstun/tap_unsupported.go
deleted file mode 100644
index 6792b229f6b79..0000000000000
--- a/net/tstun/tap_unsupported.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-//go:build !linux || ts_omit_tap
-
-package tstun
-
-func (*Wrapper) handleTAPFrame([]byte) bool { panic("unreachable") }
diff --git a/net/tstun/tun.go b/net/tstun/tun.go
index 66e209d1acb5a..9f5d42ecc3269 100644
--- a/net/tstun/tun.go
+++ b/net/tstun/tun.go
@@ -18,7 +18,7 @@ import (
)
// createTAP is non-nil on Linux.
-var createTAP func(tapName, bridgeName string) (tun.Device, error)
+var createTAP func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)
// New returns a tun.Device for the requested device name, along with
// the OS-dependent name that was allocated to the device.
@@ -42,7 +42,7 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) {
default:
return nil, "", errors.New("bogus tap argument")
}
- dev, err = createTAP(tapName, bridgeName)
+ dev, err = createTAP(logf, tapName, bridgeName)
} else {
dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU()))
}
diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go
index dcd43d5718ca8..deb8bc0944a37 100644
--- a/net/tstun/wrap.go
+++ b/net/tstun/wrap.go
@@ -109,9 +109,7 @@ type Wrapper struct {
lastActivityAtomic mono.Time // time of last send or receive
destIPActivity syncs.AtomicValue[map[netip.Addr]func()]
- //lint:ignore U1000 used in tap_linux.go
- destMACAtomic syncs.AtomicValue[[6]byte]
- discoKey syncs.AtomicValue[key.DiscoPublic]
+ discoKey syncs.AtomicValue[key.DiscoPublic]
// timeNow, if non-nil, will be used to obtain the current time.
timeNow func() time.Time
@@ -215,24 +213,14 @@ type Wrapper struct {
}
type metrics struct {
- inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[dropPacketLabel]
- outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[dropPacketLabel]
+ inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels]
+ outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels]
}
func registerMetrics(reg *usermetric.Registry) *metrics {
return &metrics{
- inboundDroppedPacketsTotal: usermetric.NewMultiLabelMapWithRegistry[dropPacketLabel](
- reg,
- "tailscaled_inbound_dropped_packets_total",
- "counter",
- "Counts the number of dropped packets received by the node from other peers",
- ),
- outboundDroppedPacketsTotal: usermetric.NewMultiLabelMapWithRegistry[dropPacketLabel](
- reg,
- "tailscaled_outbound_dropped_packets_total",
- "counter",
- "Counts the number of packets dropped while being sent to other peers",
- ),
+ inboundDroppedPacketsTotal: reg.DroppedPacketsInbound(),
+ outboundDroppedPacketsTotal: reg.DroppedPacketsOutbound(),
}
}
@@ -257,12 +245,6 @@ type tunVectorReadResult struct {
dataOffset int
}
-type setWrapperer interface {
- // setWrapper enables the underlying TUN/TAP to have access to the Wrapper.
- // It MUST be called only once during initialization, other usage is unsafe.
- setWrapper(*Wrapper)
-}
-
// Start unblocks any Wrapper.Read calls that have already started
// and makes the Wrapper functional.
//
@@ -313,10 +295,6 @@ func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry)
w.bufferConsumed <- struct{}{}
w.noteActivity()
- if sw, ok := w.tdev.(setWrapperer); ok {
- sw.setWrapper(w)
- }
-
return w
}
@@ -459,12 +437,18 @@ const ethernetFrameSize = 14 // 2 six byte MACs, 2 bytes ethertype
func (t *Wrapper) pollVector() {
sizes := make([]int, len(t.vectorBuffer))
readOffset := PacketStartOffset
+ reader := t.tdev.Read
if t.isTAP {
- readOffset = PacketStartOffset - ethernetFrameSize
+ type tapReader interface {
+ ReadEthernet(buffs [][]byte, sizes []int, offset int) (int, error)
+ }
+ if r, ok := t.tdev.(tapReader); ok {
+ readOffset = PacketStartOffset - ethernetFrameSize
+ reader = r.ReadEthernet
+ }
}
for range t.bufferConsumed {
- DoRead:
for i := range t.vectorBuffer {
t.vectorBuffer[i] = t.vectorBuffer[i][:cap(t.vectorBuffer[i])]
}
@@ -474,7 +458,7 @@ func (t *Wrapper) pollVector() {
if t.isClosed() {
return
}
- n, err = t.tdev.Read(t.vectorBuffer[:], sizes, readOffset)
+ n, err = reader(t.vectorBuffer[:], sizes, readOffset)
if t.isTAP && tapDebug {
s := fmt.Sprintf("% x", t.vectorBuffer[0][:])
for strings.HasSuffix(s, " 00") {
@@ -486,21 +470,6 @@ func (t *Wrapper) pollVector() {
for i := range sizes[:n] {
t.vectorBuffer[i] = t.vectorBuffer[i][:readOffset+sizes[i]]
}
- if t.isTAP {
- if err == nil {
- ethernetFrame := t.vectorBuffer[0][readOffset:]
- if t.handleTAPFrame(ethernetFrame) {
- goto DoRead
- }
- }
- // Fall through. We got an IP packet.
- if sizes[0] >= ethernetFrameSize {
- t.vectorBuffer[0] = t.vectorBuffer[0][:readOffset+sizes[0]-ethernetFrameSize]
- }
- if tapDebug {
- t.logf("tap regular frame: %x", t.vectorBuffer[0][PacketStartOffset:PacketStartOffset+sizes[0]])
- }
- }
t.sendVectorOutbound(tunVectorReadResult{
data: t.vectorBuffer[:n],
dataOffset: PacketStartOffset,
@@ -823,10 +792,19 @@ func (pc *peerConfigTable) outboundPacketIsJailed(p *packet.Parsed) bool {
return c.jailed
}
+type setIPer interface {
+ // SetIP sets the IP addresses of the TAP device.
+ SetIP(ipV4, ipV6 netip.Addr) error
+}
+
// SetWGConfig is called when a new NetworkMap is received.
func (t *Wrapper) SetWGConfig(wcfg *wgcfg.Config) {
+ if t.isTAP {
+ if sip, ok := t.tdev.(setIPer); ok {
+ sip.SetIP(findV4(wcfg.Addresses), findV6(wcfg.Addresses))
+ }
+ }
cfg := peerConfigTableFromWGConfig(wcfg)
-
old := t.peerConfig.Swap(cfg)
if !reflect.DeepEqual(old, cfg) {
t.logf("peer config: %v", cfg)
@@ -898,9 +876,10 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf
if filt.RunOut(p, t.filterFlags) != filter.Accept {
metricPacketOutDropFilter.Add(1)
- t.metrics.outboundDroppedPacketsTotal.Add(dropPacketLabel{
- Reason: DropReasonACL,
- }, 1)
+ // TODO(#14280): increment a t.metrics.outboundDroppedPacketsTotal here
+ // once we figure out & document what labels to use for multicast,
+ // link-local-unicast, IP fragments, etc. But they're not
+ // usermetric.ReasonACL.
return filter.Drop, gro
}
@@ -1170,8 +1149,8 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca
if outcome != filter.Accept {
metricPacketInDropFilter.Add(1)
- t.metrics.inboundDroppedPacketsTotal.Add(dropPacketLabel{
- Reason: DropReasonACL,
+ t.metrics.inboundDroppedPacketsTotal.Add(usermetric.DropLabels{
+ Reason: usermetric.ReasonACL,
}, 1)
// Tell them, via TSMP, we're dropping them due to the ACL.
@@ -1251,8 +1230,8 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) {
t.noteActivity()
_, err := t.tdevWrite(buffs, offset)
if err != nil {
- t.metrics.inboundDroppedPacketsTotal.Add(dropPacketLabel{
- Reason: DropReasonError,
+ t.metrics.inboundDroppedPacketsTotal.Add(usermetric.DropLabels{
+ Reason: usermetric.ReasonError,
}, int64(len(buffs)))
}
return len(buffs), err
@@ -1494,20 +1473,6 @@ var (
metricPacketOutDropSelfDisco = clientmetric.NewCounter("tstun_out_to_wg_drop_self_disco")
)
-type DropReason string
-
-const (
- DropReasonACL DropReason = "acl"
- DropReasonError DropReason = "error"
-)
-
-type dropPacketLabel struct {
- // Reason indicates what we have done with the packet, and has the following values:
- // - acl (rejected packets because of ACL)
- // - error (rejected packets because of an error)
- Reason DropReason
-}
-
func (t *Wrapper) InstallCaptureHook(cb capture.Callback) {
t.captureHook.Store(cb)
}
diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go
index 0ed0075b616ee..a3dfe7d86c914 100644
--- a/net/tstun/wrap_test.go
+++ b/net/tstun/wrap_test.go
@@ -441,19 +441,19 @@ func TestFilter(t *testing.T) {
}
var metricInboundDroppedPacketsACL, metricInboundDroppedPacketsErr, metricOutboundDroppedPacketsACL int64
- if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}).(*expvar.Int); ok {
+ if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(usermetric.DropLabels{Reason: usermetric.ReasonACL}).(*expvar.Int); ok {
metricInboundDroppedPacketsACL = m.Value()
}
- if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonError}).(*expvar.Int); ok {
+ if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(usermetric.DropLabels{Reason: usermetric.ReasonError}).(*expvar.Int); ok {
metricInboundDroppedPacketsErr = m.Value()
}
- if m, ok := tun.metrics.outboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}).(*expvar.Int); ok {
+ if m, ok := tun.metrics.outboundDroppedPacketsTotal.Get(usermetric.DropLabels{Reason: usermetric.ReasonACL}).(*expvar.Int); ok {
metricOutboundDroppedPacketsACL = m.Value()
}
assertMetricPackets(t, "inACL", 3, metricInboundDroppedPacketsACL)
assertMetricPackets(t, "inError", 0, metricInboundDroppedPacketsErr)
- assertMetricPackets(t, "outACL", 1, metricOutboundDroppedPacketsACL)
+ assertMetricPackets(t, "outACL", 0, metricOutboundDroppedPacketsACL)
}
func assertMetricPackets(t *testing.T, metricName string, want, got int64) {
diff --git a/prober/derp.go b/prober/derp.go
index 0dadbe8c2fe06..b1ebc590d4f98 100644
--- a/prober/derp.go
+++ b/prober/derp.go
@@ -45,6 +45,9 @@ type derpProber struct {
bwInterval time.Duration
bwProbeSize int64
+ // Optionally restrict probes to a single regionCode.
+ regionCode string
+
// Probe class for fetching & updating the DERP map.
ProbeMap ProbeClass
@@ -97,6 +100,14 @@ func WithTLSProbing(interval time.Duration) DERPOpt {
}
}
+// WithRegion restricts probing to the specified region identified by its code
+// (e.g. "lax"). This is case sensitive.
+func WithRegion(regionCode string) DERPOpt {
+ return func(d *derpProber) {
+ d.regionCode = regionCode
+ }
+}
+
// DERP creates a new derpProber.
//
// If derpMapURL is "local", the DERPMap is fetched via
@@ -135,6 +146,10 @@ func (d *derpProber) probeMapFn(ctx context.Context) error {
defer d.Unlock()
for _, region := range d.lastDERPMap.Regions {
+ if d.skipRegion(region) {
+ continue
+ }
+
for _, server := range region.Nodes {
labels := Labels{
"region": region.RegionCode,
@@ -316,6 +331,10 @@ func (d *derpProber) updateMap(ctx context.Context) error {
d.lastDERPMapAt = time.Now()
d.nodes = make(map[string]*tailcfg.DERPNode)
for _, reg := range d.lastDERPMap.Regions {
+ if d.skipRegion(reg) {
+ continue
+ }
+
for _, n := range reg.Nodes {
if existing, ok := d.nodes[n.Name]; ok {
return fmt.Errorf("derpmap has duplicate nodes: %+v and %+v", existing, n)
@@ -338,6 +357,10 @@ func (d *derpProber) ProbeUDP(ipaddr string, port int) ProbeClass {
}
}
+func (d *derpProber) skipRegion(region *tailcfg.DERPRegion) bool {
+ return d.regionCode != "" && region.RegionCode != d.regionCode
+}
+
func derpProbeUDP(ctx context.Context, ipStr string, port int) error {
pc, err := net.ListenPacket("udp", ":0")
if err != nil {
diff --git a/prober/derp_test.go b/prober/derp_test.go
index a34292a23b6f4..c084803e94f6a 100644
--- a/prober/derp_test.go
+++ b/prober/derp_test.go
@@ -44,6 +44,19 @@ func TestDerpProber(t *testing.T) {
},
},
},
+ 1: {
+ RegionID: 1,
+ RegionCode: "one",
+ Nodes: []*tailcfg.DERPNode{
+ {
+ Name: "n3",
+ RegionID: 0,
+ HostName: "derpn3.tailscale.test",
+ IPv4: "1.1.1.1",
+ IPv6: "::1",
+ },
+ },
+ },
},
}
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -68,6 +81,7 @@ func TestDerpProber(t *testing.T) {
meshProbeFn: func(_, _ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) },
nodes: make(map[string]*tailcfg.DERPNode),
probes: make(map[string]*Probe),
+ regionCode: "zero",
}
if err := dp.probeMapFn(context.Background()); err != nil {
t.Errorf("unexpected probeMapFn() error: %s", err)
@@ -84,9 +98,9 @@ func TestDerpProber(t *testing.T) {
// Add one more node and check that probes got created.
dm.Regions[0].Nodes = append(dm.Regions[0].Nodes, &tailcfg.DERPNode{
- Name: "n3",
+ Name: "n4",
RegionID: 0,
- HostName: "derpn3.tailscale.test",
+ HostName: "derpn4.tailscale.test",
IPv4: "1.1.1.1",
IPv6: "::1",
})
@@ -113,6 +127,19 @@ func TestDerpProber(t *testing.T) {
if len(dp.probes) != 4 {
t.Errorf("unexpected probes: %+v", dp.probes)
}
+
+ // Stop filtering regions.
+ dp.regionCode = ""
+ if err := dp.probeMapFn(context.Background()); err != nil {
+ t.Errorf("unexpected probeMapFn() error: %s", err)
+ }
+ if len(dp.nodes) != 2 {
+ t.Errorf("unexpected nodes: %+v", dp.nodes)
+ }
+ // 6 regular probes + 2 mesh probe
+ if len(dp.probes) != 8 {
+ t.Errorf("unexpected probes: %+v", dp.probes)
+ }
}
func TestRunDerpProbeNodePair(t *testing.T) {
diff --git a/release/dist/synology/pkgs.go b/release/dist/synology/pkgs.go
index 7802470e167fe..ab89dbee3e19f 100644
--- a/release/dist/synology/pkgs.go
+++ b/release/dist/synology/pkgs.go
@@ -155,8 +155,22 @@ func (t *target) mkInfo(b *dist.Build, uncompressedSz int64) []byte {
f("os_min_ver", "6.0.1-7445")
f("os_max_ver", "7.0-40000")
case 7:
- f("os_min_ver", "7.0-40000")
- f("os_max_ver", "")
+ if t.packageCenter {
+ switch t.dsmMinorVersion {
+ case 0:
+ f("os_min_ver", "7.0-40000")
+ f("os_max_ver", "7.2-60000")
+ case 2:
+ f("os_min_ver", "7.2-60000")
+ default:
+ panic(fmt.Sprintf("unsupported DSM major.minor version %s", t.dsmVersionString()))
+ }
+ } else {
+ // We do not clamp the os_max_ver currently for non-package center builds as
+ // the binaries for 7.0 and 7.2 are identical.
+ f("os_min_ver", "7.0-40000")
+ f("os_max_ver", "")
+ }
default:
panic(fmt.Sprintf("unsupported DSM major version %d", t.dsmMajorVersion))
}
diff --git a/safeweb/http.go b/safeweb/http.go
index 14c61336ac311..983ff2fad8031 100644
--- a/safeweb/http.go
+++ b/safeweb/http.go
@@ -71,28 +71,78 @@ package safeweb
import (
"cmp"
+ "context"
crand "crypto/rand"
"fmt"
"log"
+ "maps"
"net"
"net/http"
"net/url"
"path"
+ "slices"
"strings"
"github.com/gorilla/csrf"
)
-// The default Content-Security-Policy header.
-var defaultCSP = strings.Join([]string{
- `default-src 'self'`, // origin is the only valid source for all content types
- `script-src 'self'`, // disallow inline javascript
- `frame-ancestors 'none'`, // disallow framing of the page
- `form-action 'self'`, // disallow form submissions to other origins
- `base-uri 'self'`, // disallow base URIs from other origins
- `block-all-mixed-content`, // disallow mixed content when serving over HTTPS
- `object-src 'self'`, // disallow embedding of resources from other origins
-}, "; ")
+// CSP is the value of a Content-Security-Policy header. Keys are CSP
+// directives (like "default-src") and values are source expressions (like
+// "'self'" or "https://tailscale.com"). A nil slice value is allowed for some
+// directives like "upgrade-insecure-requests" that don't expect a list of
+// source definitions.
+type CSP map[string][]string
+
+// DefaultCSP is the recommended CSP to use when not loading resources from
+// other domains and not embedding the current website. If you need to tweak
+// the CSP, it is recommended to extend DefaultCSP instead of writing your own
+// from scratch.
+func DefaultCSP() CSP {
+ return CSP{
+ "default-src": {"self"}, // origin is the only valid source for all content types
+ "frame-ancestors": {"none"}, // disallow framing of the page
+ "form-action": {"self"}, // disallow form submissions to other origins
+ "base-uri": {"self"}, // disallow base URIs from other origins
+ // TODO(awly): consider upgrade-insecure-requests in SecureContext
+ // instead, as this is deprecated.
+ "block-all-mixed-content": nil, // disallow mixed content when serving over HTTPS
+ }
+}
+
+// Set sets the values for a given directive. Empty values are allowed, if the
+// directive doesn't expect any (like "upgrade-insecure-requests").
+func (csp CSP) Set(directive string, values ...string) {
+ csp[directive] = values
+}
+
+// Add adds a source expression to an existing directive.
+func (csp CSP) Add(directive, value string) {
+ csp[directive] = append(csp[directive], value)
+}
+
+// Del deletes a directive and all its values.
+func (csp CSP) Del(directive string) {
+ delete(csp, directive)
+}
+
+func (csp CSP) String() string {
+ keys := slices.Collect(maps.Keys(csp))
+ slices.Sort(keys)
+ var s strings.Builder
+ for _, k := range keys {
+ s.WriteString(k)
+ for _, v := range csp[k] {
+ // Special values like 'self', 'none', 'unsafe-inline', etc., must
+ // be quoted. Do it implicitly as a convenience here.
+ if !strings.Contains(v, ".") && len(v) > 1 && v[0] != '\'' && v[len(v)-1] != '\'' {
+ v = "'" + v + "'"
+ }
+ s.WriteString(" " + v)
+ }
+ s.WriteString("; ")
+ }
+ return strings.TrimSpace(s.String())
+}
// The default Strict-Transport-Security header. This header tells the browser
// to exclusively use HTTPS for all requests to the origin for the next year.
@@ -130,6 +180,9 @@ type Config struct {
// startup.
CSRFSecret []byte
+ // CSP is the Content-Security-Policy header to return with BrowserMux
+ // responses.
+ CSP CSP
// CSPAllowInlineStyles specifies whether to include `style-src:
// unsafe-inline` in the Content-Security-Policy header to permit the use of
// inline CSS.
@@ -168,6 +221,10 @@ func (c *Config) setDefaults() error {
}
}
+ if c.CSP == nil {
+ c.CSP = DefaultCSP()
+ }
+
return nil
}
@@ -199,16 +256,20 @@ func NewServer(config Config) (*Server, error) {
if config.CookiesSameSiteLax {
sameSite = csrf.SameSiteLaxMode
}
+ if config.CSPAllowInlineStyles {
+ if _, ok := config.CSP["style-src"]; ok {
+ config.CSP.Add("style-src", "unsafe-inline")
+ } else {
+ config.CSP.Set("style-src", "self", "unsafe-inline")
+ }
+ }
s := &Server{
Config: config,
- csp: defaultCSP,
+ csp: config.CSP.String(),
// only set Secure flag on CSRF cookies if we are in a secure context
// as otherwise the browser will reject the cookie
csrfProtect: csrf.Protect(config.CSRFSecret, csrf.Secure(config.SecureContext), csrf.SameSite(sameSite)),
}
- if config.CSPAllowInlineStyles {
- s.csp = defaultCSP + `; style-src 'self' 'unsafe-inline'`
- }
s.h = cmp.Or(config.HTTPServer, &http.Server{})
if s.h.Handler != nil {
return nil, fmt.Errorf("use safeweb.Config.APIMux and safeweb.Config.BrowserMux instead of http.Server.Handler")
@@ -225,12 +286,27 @@ const (
browserHandler
)
+func (h handlerType) String() string {
+ switch h {
+ case browserHandler:
+ return "browser"
+ case apiHandler:
+ return "api"
+ default:
+ return "unknown"
+ }
+}
+
// checkHandlerType returns either apiHandler or browserHandler, depending on
// whether apiPattern or browserPattern is more specific (i.e. which pattern
// contains more pathname components). If they are equally specific, it returns
// unknownHandler.
func checkHandlerType(apiPattern, browserPattern string) handlerType {
- c := cmp.Compare(strings.Count(path.Clean(apiPattern), "/"), strings.Count(path.Clean(browserPattern), "/"))
+ apiPattern, browserPattern = path.Clean(apiPattern), path.Clean(browserPattern)
+ c := cmp.Compare(strings.Count(apiPattern, "/"), strings.Count(browserPattern, "/"))
+ if apiPattern == "/" || browserPattern == "/" {
+ c = cmp.Compare(len(apiPattern), len(browserPattern))
+ }
switch {
case c > 0:
return apiHandler
@@ -341,3 +417,7 @@ func (s *Server) ListenAndServe(addr string) error {
func (s *Server) Close() error {
return s.h.Close()
}
+
+// Shutdown gracefully shuts down the server without interrupting any active
+// connections. It has the same semantics as[http.Server.Shutdown].
+func (s *Server) Shutdown(ctx context.Context) error { return s.h.Shutdown(ctx) }
diff --git a/safeweb/http_test.go b/safeweb/http_test.go
index cec14b2b9bb8b..852ce326ba374 100644
--- a/safeweb/http_test.go
+++ b/safeweb/http_test.go
@@ -241,18 +241,26 @@ func TestCSRFProtection(t *testing.T) {
func TestContentSecurityPolicyHeader(t *testing.T) {
tests := []struct {
name string
+ csp CSP
apiRoute bool
- wantCSP bool
+ wantCSP string
}{
{
- name: "default routes get CSP headers",
- apiRoute: false,
- wantCSP: true,
+ name: "default CSP",
+ wantCSP: `base-uri 'self'; block-all-mixed-content; default-src 'self'; form-action 'self'; frame-ancestors 'none';`,
+ },
+ {
+ name: "custom CSP",
+ csp: CSP{
+ "default-src": {"'self'", "https://tailscale.com"},
+ "upgrade-insecure-requests": nil,
+ },
+ wantCSP: `default-src 'self' https://tailscale.com; upgrade-insecure-requests;`,
},
{
name: "`/api/*` routes do not get CSP headers",
apiRoute: true,
- wantCSP: false,
+ wantCSP: "",
},
}
@@ -265,9 +273,9 @@ func TestContentSecurityPolicyHeader(t *testing.T) {
var s *Server
var err error
if tt.apiRoute {
- s, err = NewServer(Config{APIMux: h})
+ s, err = NewServer(Config{APIMux: h, CSP: tt.csp})
} else {
- s, err = NewServer(Config{BrowserMux: h})
+ s, err = NewServer(Config{BrowserMux: h, CSP: tt.csp})
}
if err != nil {
t.Fatal(err)
@@ -279,8 +287,8 @@ func TestContentSecurityPolicyHeader(t *testing.T) {
s.h.Handler.ServeHTTP(w, req)
resp := w.Result()
- if (resp.Header.Get("Content-Security-Policy") == "") == tt.wantCSP {
- t.Fatalf("content security policy want: %v; got: %v", tt.wantCSP, resp.Header.Get("Content-Security-Policy"))
+ if got := resp.Header.Get("Content-Security-Policy"); got != tt.wantCSP {
+ t.Fatalf("content security policy want: %q; got: %q", tt.wantCSP, got)
}
})
}
@@ -397,7 +405,7 @@ func TestCSPAllowInlineStyles(t *testing.T) {
csp := resp.Header.Get("Content-Security-Policy")
allowsStyles := strings.Contains(csp, "style-src 'self' 'unsafe-inline'")
if allowsStyles != allow {
- t.Fatalf("CSP inline styles want: %v; got: %v", allow, allowsStyles)
+ t.Fatalf("CSP inline styles want: %v, got: %v in %q", allow, allowsStyles, csp)
}
})
}
@@ -527,13 +535,13 @@ func TestGetMoreSpecificPattern(t *testing.T) {
{
desc: "same prefix",
a: "/foo/bar/quux",
- b: "/foo/bar/",
+ b: "/foo/bar/", // path.Clean will strip the trailing slash.
want: apiHandler,
},
{
desc: "almost same prefix, but not a path component",
a: "/goat/sheep/cheese",
- b: "/goat/sheepcheese/",
+ b: "/goat/sheepcheese/", // path.Clean will strip the trailing slash.
want: apiHandler,
},
{
@@ -554,6 +562,12 @@ func TestGetMoreSpecificPattern(t *testing.T) {
b: "///////",
want: unknownHandler,
},
+ {
+ desc: "root-level",
+ a: "/latest",
+ b: "/", // path.Clean will NOT strip the trailing slash.
+ want: apiHandler,
+ },
} {
t.Run(tt.desc, func(t *testing.T) {
got := checkHandlerType(tt.a, tt.b)
diff --git a/scripts/installer.sh b/scripts/installer.sh
index 19911ee23c8a7..d2971978eebe7 100755
--- a/scripts/installer.sh
+++ b/scripts/installer.sh
@@ -224,7 +224,7 @@ main() {
VERSION="leap/15.4"
PACKAGETYPE="zypper"
;;
- arch|archarm|endeavouros|blendos|garuda)
+ arch|archarm|endeavouros|blendos|garuda|archcraft)
OS="arch"
VERSION="" # rolling release
PACKAGETYPE="pacman"
@@ -488,9 +488,41 @@ main() {
set +x
;;
dnf)
+ # DNF 5 has a different argument format; determine which one we have.
+ DNF_VERSION="3"
+ if dnf --version | grep -q '^dnf5 version'; then
+ DNF_VERSION="5"
+ fi
+
+ # The 'config-manager' plugin wasn't implemented when
+ # DNF5 was released; detect that and use the old
+ # version if necessary.
+ if [ "$DNF_VERSION" = "5" ]; then
+ set -x
+ $SUDO dnf install -y 'dnf-command(config-manager)' && DNF_HAVE_CONFIG_MANAGER=1 || DNF_HAVE_CONFIG_MANAGER=0
+ set +x
+
+ if [ "$DNF_HAVE_CONFIG_MANAGER" != "1" ]; then
+ if type dnf-3 >/dev/null; then
+ DNF_VERSION="3"
+ else
+ echo "dnf 5 detected, but 'dnf-command(config-manager)' not available and dnf-3 not found"
+ exit 1
+ fi
+ fi
+ fi
+
set -x
- $SUDO dnf install -y 'dnf-command(config-manager)'
- $SUDO dnf config-manager --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo"
+ if [ "$DNF_VERSION" = "3" ]; then
+ $SUDO dnf install -y 'dnf-command(config-manager)'
+ $SUDO dnf config-manager --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo"
+ elif [ "$DNF_VERSION" = "5" ]; then
+ # Already installed config-manager, above.
+ $SUDO dnf config-manager addrepo --from-repofile="https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo"
+ else
+ echo "unexpected: unknown dnf version $DNF_VERSION"
+ exit 1
+ fi
$SUDO dnf install -y tailscale
$SUDO systemctl enable --now tailscaled
set +x
diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go
index db966ba2cdee2..94761393f885d 100644
--- a/sessionrecording/connect.go
+++ b/sessionrecording/connect.go
@@ -7,6 +7,8 @@ package sessionrecording
import (
"context"
+ "crypto/tls"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -14,12 +16,33 @@ import (
"net/http"
"net/http/httptrace"
"net/netip"
+ "sync/atomic"
"time"
+ "golang.org/x/net/http2"
"tailscale.com/tailcfg"
+ "tailscale.com/util/httpm"
"tailscale.com/util/multierr"
)
+const (
+ // Timeout for an individual DialFunc call for a single recorder address.
+ perDialAttemptTimeout = 5 * time.Second
+ // Timeout for the V2 API HEAD probe request (supportsV2).
+ http2ProbeTimeout = 10 * time.Second
+ // Maximum timeout for trying all available recorders, including V2 API
+ // probes and dial attempts.
+ allDialAttemptsTimeout = 30 * time.Second
+)
+
+// uploadAckWindow is the period of time to wait for an ackFrame from recorder
+// before terminating the connection. This is a variable to allow overriding it
+// in tests.
+var uploadAckWindow = 30 * time.Second
+
+// DialFunc is a function for dialing the recorder.
+type DialFunc func(ctx context.Context, network, host string) (net.Conn, error)
+
// ConnectToRecorder connects to the recorder at any of the provided addresses.
// It returns the first successful response, or a multierr if all attempts fail.
//
@@ -32,19 +55,15 @@ import (
// attempts are in order the recorder(s) was attempted. If successful a
// successful connection is made, the last attempt in the slice is the
// attempt for connected recorder.
-func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) {
+func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) {
if len(recs) == 0 {
return nil, nil, nil, errors.New("no recorders configured")
}
// We use a special context for dialing the recorder, so that we can
// limit the time we spend dialing to 30 seconds and still have an
// unbounded context for the upload.
- dialCtx, dialCancel := context.WithTimeout(ctx, 30*time.Second)
+ dialCtx, dialCancel := context.WithTimeout(ctx, allDialAttemptsTimeout)
defer dialCancel()
- hc, err := SessionRecordingClientForDialer(dialCtx, dial)
- if err != nil {
- return nil, nil, nil, err
- }
var errs []error
var attempts []*tailcfg.SSHRecordingAttempt
@@ -54,74 +73,230 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial func(con
}
attempts = append(attempts, attempt)
- // We dial the recorder and wait for it to send a 100-continue
- // response before returning from this function. This ensures that
- // the recorder is ready to accept the recording.
-
- // got100 is closed when we receive the 100-continue response.
- got100 := make(chan struct{})
- ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{
- Got100Continue: func() {
- close(got100)
- },
- })
-
- pr, pw := io.Pipe()
- req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s:%d/record", ap.Addr(), ap.Port()), pr)
+ var pw io.WriteCloser
+ var errChan <-chan error
+ var err error
+ hc := clientHTTP2(dialCtx, dial)
+ // We need to probe V2 support using a separate HEAD request. Sending
+ // an HTTP/2 POST request to a HTTP/1 server will just "hang" until the
+ // request body is closed (instead of returning a 404 as one would
+ // expect). Sending a HEAD request without a body does not have that
+ // problem.
+ if supportsV2(ctx, hc, ap) {
+ pw, errChan, err = connectV2(ctx, hc, ap)
+ } else {
+ pw, errChan, err = connectV1(ctx, clientHTTP1(dialCtx, dial), ap)
+ }
if err != nil {
- err = fmt.Errorf("recording: error starting recording: %w", err)
+ err = fmt.Errorf("recording: error starting recording on %q: %w", ap, err)
attempt.FailureMessage = err.Error()
errs = append(errs, err)
continue
}
- // We set the Expect header to 100-continue, so that the recorder
- // will send a 100-continue response before it starts reading the
- // request body.
- req.Header.Set("Expect", "100-continue")
+ return pw, attempts, errChan, nil
+ }
+ return nil, attempts, nil, multierr.New(errs...)
+}
- // errChan is used to indicate the result of the request.
- errChan := make(chan error, 1)
- go func() {
- resp, err := hc.Do(req)
- if err != nil {
- errChan <- fmt.Errorf("recording: error starting recording: %w", err)
+// supportsV2 checks whether a recorder instance supports the /v2/record
+// endpoint.
+func supportsV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) bool {
+ ctx, cancel := context.WithTimeout(ctx, http2ProbeTimeout)
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, httpm.HEAD, fmt.Sprintf("http://%s/v2/record", ap), nil)
+ if err != nil {
+ return false
+ }
+ resp, err := hc.Do(req)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ return resp.StatusCode == http.StatusOK && resp.ProtoMajor > 1
+}
+
+// connectV1 connects to the legacy /record endpoint on the recorder. It is
+// used for backwards-compatibility with older tsrecorder instances.
+//
+// On success, it returns a WriteCloser that can be used to upload the
+// recording, and a channel that will be sent an error (or nil) when the upload
+// fails or completes.
+func connectV1(ctx context.Context, hc *http.Client, ap netip.AddrPort) (io.WriteCloser, <-chan error, error) {
+ // We dial the recorder and wait for it to send a 100-continue
+ // response before returning from this function. This ensures that
+ // the recorder is ready to accept the recording.
+
+ // got100 is closed when we receive the 100-continue response.
+ got100 := make(chan struct{})
+ ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{
+ Got100Continue: func() {
+ close(got100)
+ },
+ })
+
+ pr, pw := io.Pipe()
+ req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/record", ap), pr)
+ if err != nil {
+ return nil, nil, err
+ }
+ // We set the Expect header to 100-continue, so that the recorder
+ // will send a 100-continue response before it starts reading the
+ // request body.
+ req.Header.Set("Expect", "100-continue")
+
+ // errChan is used to indicate the result of the request.
+ errChan := make(chan error, 1)
+ go func() {
+ defer close(errChan)
+ resp, err := hc.Do(req)
+ if err != nil {
+ errChan <- err
+ return
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ errChan <- fmt.Errorf("recording: unexpected status: %v", resp.Status)
+ return
+ }
+ }()
+ select {
+ case <-got100:
+ return pw, errChan, nil
+ case err := <-errChan:
+ // If we get an error before we get the 100-continue response,
+ // we need to try another recorder.
+ if err == nil {
+ // If the error is nil, we got a 200 response, which
+ // is unexpected as we haven't sent any data yet.
+ err = errors.New("recording: unexpected EOF")
+ }
+ return nil, nil, err
+ }
+}
+
+// connectV2 connects to the /v2/record endpoint on the recorder over HTTP/2.
+// It explicitly tracks ack frames sent in the response and terminates the
+// connection if sent recording data is un-acked for uploadAckWindow.
+//
+// On success, it returns a WriteCloser that can be used to upload the
+// recording, and a channel that will be sent an error (or nil) when the upload
+// fails or completes.
+func connectV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) (io.WriteCloser, <-chan error, error) {
+ pr, pw := io.Pipe()
+ upload := &readCounter{r: pr}
+ req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/v2/record", ap), upload)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // With HTTP/2, hc.Do will not block while the request body is being sent.
+ // It will return immediately and allow us to consume the response body at
+ // the same time.
+ resp, err := hc.Do(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ resp.Body.Close()
+ return nil, nil, fmt.Errorf("recording: unexpected status: %v", resp.Status)
+ }
+
+ errChan := make(chan error, 1)
+ acks := make(chan int64)
+ // Read acks from the response and send them to the acks channel.
+ go func() {
+ defer close(errChan)
+ defer close(acks)
+ defer resp.Body.Close()
+ defer pw.Close()
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var frame v2ResponseFrame
+ if err := dec.Decode(&frame); err != nil {
+ if !errors.Is(err, io.EOF) {
+ errChan <- fmt.Errorf("recording: unexpected error receiving acks: %w", err)
+ }
return
}
- if resp.StatusCode != 200 {
- errChan <- fmt.Errorf("recording: unexpected status: %v", resp.Status)
+ if frame.Error != "" {
+ errChan <- fmt.Errorf("recording: received error from the recorder: %q", frame.Error)
return
}
- errChan <- nil
- }()
- select {
- case <-got100:
- case err := <-errChan:
- // If we get an error before we get the 100-continue response,
- // we need to try another recorder.
- if err == nil {
- // If the error is nil, we got a 200 response, which
- // is unexpected as we haven't sent any data yet.
- err = errors.New("recording: unexpected EOF")
+ select {
+ case acks <- frame.Ack:
+ case <-ctx.Done():
+ return
}
- attempt.FailureMessage = err.Error()
- errs = append(errs, err)
- continue // try the next recorder
}
- return pw, attempts, errChan, nil
- }
- return nil, attempts, nil, multierr.New(errs...)
+ }()
+ // Track acks from the acks channel.
+ go func() {
+ // Hack for tests: some tests modify uploadAckWindow and reset it when
+ // the test ends. This can race with t.Reset call below. Making a copy
+ // here is a lazy workaround to not wait for this goroutine to exit in
+ // the test cases.
+ uploadAckWindow := uploadAckWindow
+ // This timer fires if we didn't receive an ack for too long.
+ t := time.NewTimer(uploadAckWindow)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ // Close the pipe which terminates the connection and cleans up
+ // other goroutines. Note that tsrecorder will send us ack
+ // frames even if there is no new data to ack. This helps
+ // detect broken recorder connection if the session is idle.
+ pr.CloseWithError(errNoAcks)
+ resp.Body.Close()
+ return
+ case _, ok := <-acks:
+ if !ok {
+ // acks channel closed means that the goroutine reading them
+ // finished, which means that the request has ended.
+ return
+ }
+ // TODO(awly): limit how far behind the received acks can be. This
+ // should handle scenarios where a session suddenly dumps a lot of
+ // output.
+ t.Reset(uploadAckWindow)
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ return pw, errChan, nil
}
-// SessionRecordingClientForDialer returns an http.Client that uses a clone of
-// the provided Dialer's PeerTransport to dial connections. This is used to make
-// requests to the session recording server to upload session recordings. It
-// uses the provided dialCtx to dial connections, and limits a single dial to 5
-// seconds.
-func SessionRecordingClientForDialer(dialCtx context.Context, dial func(context.Context, string, string) (net.Conn, error)) (*http.Client, error) {
- tr := http.DefaultTransport.(*http.Transport).Clone()
+var errNoAcks = errors.New("did not receive ack frames from the recorder in 30s")
+
+type v2ResponseFrame struct {
+ // Ack is the number of bytes received from the client so far. The bytes
+ // are not guaranteed to be durably stored yet.
+ Ack int64 `json:"ack,omitempty"`
+ // Error is an error encountered while storing the recording. Error is only
+ // ever set as the last frame in the response.
+ Error string `json:"error,omitempty"`
+}
+// readCounter is an io.Reader that counts how many bytes were read.
+type readCounter struct {
+ r io.Reader
+ sent atomic.Int64
+}
+
+func (u *readCounter) Read(buf []byte) (int, error) {
+ n, err := u.r.Read(buf)
+ u.sent.Add(int64(n))
+ return n, err
+}
+
+// clientHTTP1 returns a claassic http.Client with a per-dial context. It uses
+// dialCtx and adds a 5s timeout to it.
+func clientHTTP1(dialCtx context.Context, dial DialFunc) *http.Client {
+ tr := http.DefaultTransport.(*http.Transport).Clone()
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
- perAttemptCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout)
defer cancel()
go func() {
select {
@@ -132,7 +307,32 @@ func SessionRecordingClientForDialer(dialCtx context.Context, dial func(context.
}()
return dial(perAttemptCtx, network, addr)
}
+ return &http.Client{Transport: tr}
+}
+
+// clientHTTP2 is like clientHTTP1 but returns an http.Client suitable for h2c
+// requests (HTTP/2 over plaintext). Unfortunately the same client does not
+// work for HTTP/1 so we need to split these up.
+func clientHTTP2(dialCtx context.Context, dial DialFunc) *http.Client {
return &http.Client{
- Transport: tr,
- }, nil
+ Transport: &http2.Transport{
+ // Allow "http://" scheme in URLs.
+ AllowHTTP: true,
+ // Pretend like we're using TLS, but actually use the provided
+ // DialFunc underneath. This is necessary to convince the transport
+ // to actually dial.
+ DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) {
+ perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout)
+ defer cancel()
+ go func() {
+ select {
+ case <-perAttemptCtx.Done():
+ case <-dialCtx.Done():
+ cancel()
+ }
+ }()
+ return dial(perAttemptCtx, network, addr)
+ },
+ },
+ }
}
diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go
new file mode 100644
index 0000000000000..c0fcf6d40c617
--- /dev/null
+++ b/sessionrecording/connect_test.go
@@ -0,0 +1,189 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package sessionrecording
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/json"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/netip"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
+)
+
+func TestConnectToRecorder(t *testing.T) {
+ tests := []struct {
+ desc string
+ http2 bool
+ // setup returns a recorder server mux, and a channel which sends the
+ // hash of the recording uploaded to it. The channel is expected to
+ // fire only once.
+ setup func(t *testing.T) (*http.ServeMux, <-chan []byte)
+ wantErr bool
+ }{
+ {
+ desc: "v1 recorder",
+ setup: func(t *testing.T) (*http.ServeMux, <-chan []byte) {
+ uploadHash := make(chan []byte, 1)
+ mux := http.NewServeMux()
+ mux.HandleFunc("POST /record", func(w http.ResponseWriter, r *http.Request) {
+ hash := sha256.New()
+ if _, err := io.Copy(hash, r.Body); err != nil {
+ t.Error(err)
+ }
+ uploadHash <- hash.Sum(nil)
+ })
+ return mux, uploadHash
+ },
+ },
+ {
+ desc: "v2 recorder",
+ http2: true,
+ setup: func(t *testing.T) (*http.ServeMux, <-chan []byte) {
+ uploadHash := make(chan []byte, 1)
+ mux := http.NewServeMux()
+ mux.HandleFunc("POST /record", func(w http.ResponseWriter, r *http.Request) {
+ t.Error("received request to v1 endpoint")
+ http.Error(w, "not found", http.StatusNotFound)
+ })
+ mux.HandleFunc("POST /v2/record", func(w http.ResponseWriter, r *http.Request) {
+ // Force the status to send to unblock the client waiting
+ // for it.
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
+ body := &readCounter{r: r.Body}
+ hash := sha256.New()
+ ctx, cancel := context.WithCancel(r.Context())
+ go func() {
+ defer cancel()
+ if _, err := io.Copy(hash, body); err != nil {
+ t.Error(err)
+ }
+ }()
+
+ // Send acks for received bytes.
+ tick := time.NewTicker(time.Millisecond)
+ defer tick.Stop()
+ enc := json.NewEncoder(w)
+ outer:
+ for {
+ select {
+ case <-ctx.Done():
+ break outer
+ case <-tick.C:
+ if err := enc.Encode(v2ResponseFrame{Ack: body.sent.Load()}); err != nil {
+ t.Errorf("writing ack frame: %v", err)
+ break outer
+ }
+ }
+ }
+
+ uploadHash <- hash.Sum(nil)
+ })
+ // Probing HEAD endpoint which always returns 200 OK.
+ mux.HandleFunc("HEAD /v2/record", func(http.ResponseWriter, *http.Request) {})
+ return mux, uploadHash
+ },
+ },
+ {
+ desc: "v2 recorder no acks",
+ http2: true,
+ wantErr: true,
+ setup: func(t *testing.T) (*http.ServeMux, <-chan []byte) {
+ // Make the client no-ack timeout quick for the test.
+ oldAckWindow := uploadAckWindow
+ uploadAckWindow = 100 * time.Millisecond
+ t.Cleanup(func() { uploadAckWindow = oldAckWindow })
+
+ uploadHash := make(chan []byte, 1)
+ mux := http.NewServeMux()
+ mux.HandleFunc("POST /record", func(w http.ResponseWriter, r *http.Request) {
+ t.Error("received request to v1 endpoint")
+ http.Error(w, "not found", http.StatusNotFound)
+ })
+ mux.HandleFunc("POST /v2/record", func(w http.ResponseWriter, r *http.Request) {
+ // Force the status to send to unblock the client waiting
+ // for it.
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
+ // Consume the whole request body but don't send any acks
+ // back.
+ hash := sha256.New()
+ if _, err := io.Copy(hash, r.Body); err != nil {
+ t.Error(err)
+ }
+ // Goes in the channel buffer, non-blocking.
+ uploadHash <- hash.Sum(nil)
+
+ // Block until the parent test case ends to prevent the
+ // request termination. We want to exercise the ack
+ // tracking logic specifically.
+ ctx, cancel := context.WithCancel(r.Context())
+ t.Cleanup(cancel)
+ <-ctx.Done()
+ })
+ mux.HandleFunc("HEAD /v2/record", func(http.ResponseWriter, *http.Request) {})
+ return mux, uploadHash
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ mux, uploadHash := tt.setup(t)
+
+ srv := httptest.NewUnstartedServer(mux)
+ if tt.http2 {
+ // Wire up h2c-compatible HTTP/2 server. This is optional
+ // because the v1 recorder didn't support HTTP/2 and we try to
+ // mimic that.
+ h2s := &http2.Server{}
+ srv.Config.Handler = h2c.NewHandler(mux, h2s)
+ if err := http2.ConfigureServer(srv.Config, h2s); err != nil {
+ t.Errorf("configuring HTTP/2 support in server: %v", err)
+ }
+ }
+ srv.Start()
+ t.Cleanup(srv.Close)
+
+ d := new(net.Dialer)
+
+ ctx := context.Background()
+ w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(srv.Listener.Addr().String())}, d.DialContext)
+ if err != nil {
+ t.Fatalf("ConnectToRecorder: %v", err)
+ }
+
+ // Send some random data and hash it to compare with the recorded
+ // data hash.
+ hash := sha256.New()
+ const numBytes = 1 << 20 // 1MB
+ if _, err := io.CopyN(io.MultiWriter(w, hash), rand.Reader, numBytes); err != nil {
+ t.Fatalf("writing recording data: %v", err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("closing recording stream: %v", err)
+ }
+ if err := <-errc; err != nil && !tt.wantErr {
+ t.Fatalf("error from the channel: %v", err)
+ } else if err == nil && tt.wantErr {
+ t.Fatalf("did not receive expected error from the channel")
+ }
+
+ if recv, sent := <-uploadHash, hash.Sum(nil); !bytes.Equal(recv, sent) {
+ t.Errorf("mismatch in recording data hash, sent %x, received %x", sent, recv)
+ }
+ })
+ }
+}
diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go
index 9ade1847e6b27..7cb99c3813104 100644
--- a/ssh/tailssh/tailssh.go
+++ b/ssh/tailssh/tailssh.go
@@ -1170,7 +1170,7 @@ func (ss *sshSession) run() {
if err != nil && !errors.Is(err, io.EOF) {
isErrBecauseProcessExited := processDone.Load() && errors.Is(err, syscall.EIO)
if !isErrBecauseProcessExited {
- logf("stdout copy: %v, %T", err)
+ logf("stdout copy: %v", err)
ss.cancelCtx(err)
}
}
@@ -1520,9 +1520,14 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) {
go func() {
err := <-errChan
if err == nil {
- // Success.
- ss.logf("recording: finished uploading recording")
- return
+ select {
+ case <-ss.ctx.Done():
+ // Success.
+ ss.logf("recording: finished uploading recording")
+ return
+ default:
+ err = errors.New("recording upload ended before the SSH session")
+ }
}
if onFailure != nil && onFailure.NotifyURL != "" && len(attempts) > 0 {
lastAttempt := attempts[len(attempts)-1]
diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go
index 9e4f5ffd3d481..ad9cb1e57b53d 100644
--- a/ssh/tailssh/tailssh_test.go
+++ b/ssh/tailssh/tailssh_test.go
@@ -33,6 +33,8 @@ import (
"time"
gossh "github.com/tailscale/golang-x-crypto/ssh"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/ipn/store/mem"
"tailscale.com/net/memnet"
@@ -48,7 +50,7 @@ import (
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/util/cibuild"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
"tailscale.com/util/must"
"tailscale.com/version/distro"
"tailscale.com/wgengine"
@@ -481,10 +483,9 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) {
}
var handler http.HandlerFunc
- recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) {
handler(w, r)
- }))
- defer recordingServer.Close()
+ })
s := &server{
logf: t.Logf,
@@ -533,9 +534,10 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) {
{
name: "upload-fails-after-starting",
handler: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
r.Body.Read(make([]byte, 1))
time.Sleep(100 * time.Millisecond)
- w.WriteHeader(http.StatusInternalServerError)
},
sshCommand: "echo hello && sleep 1 && echo world",
wantClientOutput: "\r\n\r\nsession terminated\r\n\r\n",
@@ -548,6 +550,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
+ s.logf = t.Logf
tstest.Replace(t, &handler, tt.handler)
sc, dc := memnet.NewTCPConn(src, dst, 1024)
var wg sync.WaitGroup
@@ -597,12 +600,12 @@ func TestMultipleRecorders(t *testing.T) {
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
}
done := make(chan struct{})
- recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) {
defer close(done)
- io.ReadAll(r.Body)
w.WriteHeader(http.StatusOK)
- }))
- defer recordingServer.Close()
+ w.(http.Flusher).Flush()
+ io.ReadAll(r.Body)
+ })
badRecorder, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatal(err)
@@ -610,15 +613,9 @@ func TestMultipleRecorders(t *testing.T) {
badRecorderAddr := badRecorder.Addr().String()
badRecorder.Close()
- badRecordingServer500 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(500)
- }))
- defer badRecordingServer500.Close()
-
- badRecordingServer200 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(200)
- }))
- defer badRecordingServer200.Close()
+ badRecordingServer500 := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusInternalServerError)
+ })
s := &server{
logf: t.Logf,
@@ -630,7 +627,6 @@ func TestMultipleRecorders(t *testing.T) {
Recorders: []netip.AddrPort{
netip.MustParseAddrPort(badRecorderAddr),
netip.MustParseAddrPort(badRecordingServer500.Listener.Addr().String()),
- netip.MustParseAddrPort(badRecordingServer200.Listener.Addr().String()),
netip.MustParseAddrPort(recordingServer.Listener.Addr().String()),
},
OnRecordingFailure: &tailcfg.SSHRecorderFailureAction{
@@ -701,19 +697,21 @@ func TestSSHRecordingNonInteractive(t *testing.T) {
}
var recording []byte
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) {
defer cancel()
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
var err error
recording, err = io.ReadAll(r.Body)
if err != nil {
t.Error(err)
return
}
- }))
- defer recordingServer.Close()
+ })
s := &server{
- logf: logger.Discard,
+ logf: t.Logf,
lb: &localState{
sshEnabled: true,
matchingRule: newSSHRule(
@@ -1123,14 +1121,11 @@ func TestSSH(t *testing.T) {
func parseEnv(out []byte) map[string]string {
e := map[string]string{}
- lineread.Reader(bytes.NewReader(out), func(line []byte) error {
- i := bytes.IndexByte(line, '=')
- if i == -1 {
- return nil
+ for line := range lineiter.Bytes(out) {
+ if i := bytes.IndexByte(line, '='); i != -1 {
+ e[string(line[:i])] = string(line[i+1:])
}
- e[string(line[:i])] = string(line[i+1:])
- return nil
- })
+ }
return e
}
@@ -1302,3 +1297,22 @@ func TestStdOsUserUserAssumptions(t *testing.T) {
t.Errorf("os/user.User has %v fields; this package assumes %v", got, want)
}
}
+
+func mockRecordingServer(t *testing.T, handleRecord http.HandlerFunc) *httptest.Server {
+ t.Helper()
+ mux := http.NewServeMux()
+ mux.HandleFunc("POST /record", func(http.ResponseWriter, *http.Request) {
+ t.Errorf("v1 recording endpoint called")
+ })
+ mux.HandleFunc("HEAD /v2/record", func(http.ResponseWriter, *http.Request) {})
+ mux.HandleFunc("POST /v2/record", handleRecord)
+
+ h2s := &http2.Server{}
+ srv := httptest.NewUnstartedServer(h2c.NewHandler(mux, h2s))
+ if err := http2.ConfigureServer(srv.Config, h2s); err != nil {
+ t.Errorf("configuring HTTP/2 support in recording server: %v", err)
+ }
+ srv.Start()
+ t.Cleanup(srv.Close)
+ return srv
+}
diff --git a/ssh/tailssh/user.go b/ssh/tailssh/user.go
index 33ebb4db729de..15191813bdca6 100644
--- a/ssh/tailssh/user.go
+++ b/ssh/tailssh/user.go
@@ -6,7 +6,6 @@
package tailssh
import (
- "io"
"os"
"os/exec"
"os/user"
@@ -18,7 +17,7 @@ import (
"go4.org/mem"
"tailscale.com/envknob"
"tailscale.com/hostinfo"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
"tailscale.com/util/osuser"
"tailscale.com/version/distro"
)
@@ -110,15 +109,16 @@ func defaultPathForUser(u *user.User) string {
}
func defaultPathForUserOnNixOS(u *user.User) string {
- var path string
- lineread.File("/etc/pam/environment", func(lineb []byte) error {
+ for lr := range lineiter.File("/etc/pam/environment") {
+ lineb, err := lr.Value()
+ if err != nil {
+ return ""
+ }
if v := pathFromPAMEnvLine(lineb, u); v != "" {
- path = v
- return io.EOF // stop iteration
+ return v
}
- return nil
- })
- return path
+ }
+ return ""
}
func pathFromPAMEnvLine(line []byte, u *user.User) (path string) {
diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go
index df50a860311d1..897e8d27f7f7b 100644
--- a/tailcfg/tailcfg.go
+++ b/tailcfg/tailcfg.go
@@ -142,14 +142,17 @@ type CapabilityVersion int
// - 97: 2024-06-06: Client understands NodeAttrDisableSplitDNSWhenNoCustomResolvers
// - 98: 2024-06-13: iOS/tvOS clients may provide serial number as part of posture information
// - 99: 2024-06-14: Client understands NodeAttrDisableLocalDNSOverrideViaNRPT
-// - 100: 2024-06-18: Client supports filtertype.Match.SrcCaps (issue #12542)
+// - 100: 2024-06-18: Initial support for filtertype.Match.SrcCaps - actually usable in capver 109 (issue #12542)
// - 101: 2024-07-01: Client supports SSH agent forwarding when handling connections with /bin/su
// - 102: 2024-07-12: NodeAttrDisableMagicSockCryptoRouting support
// - 103: 2024-07-24: Client supports NodeAttrDisableCaptivePortalDetection
// - 104: 2024-08-03: SelfNodeV6MasqAddrForThisPeer now works
// - 105: 2024-08-05: Fixed SSH behavior on systems that use busybox (issue #12849)
// - 106: 2024-09-03: fix panic regression from cryptokey routing change (65fe0ba7b5)
-const CurrentCapabilityVersion CapabilityVersion = 106
+// - 107: 2024-10-30: add App Connector to conffile (PR #13942)
+// - 108: 2024-11-08: Client sends ServicesHash in Hostinfo, understands c2n GET /vip-services.
+// - 109: 2024-11-18: Client supports filtertype.Match.SrcCaps (issue #12542)
+const CurrentCapabilityVersion CapabilityVersion = 109
type StableID string
@@ -651,6 +654,21 @@ func CheckTag(tag string) error {
return nil
}
+// CheckServiceName validates svc for use as a service name.
+// We only allow valid DNS labels, since the expectation is that these will be
+// used as parts of domain names.
+func CheckServiceName(svc string) error {
+ var ok bool
+ svc, ok = strings.CutPrefix(svc, "svc:")
+ if !ok {
+ return errors.New("services must start with 'svc:'")
+ }
+ if svc == "" {
+ return errors.New("service names must not be empty")
+ }
+ return dnsname.ValidLabel(svc)
+}
+
// CheckRequestTags checks that all of h.RequestTags are valid.
func (h *Hostinfo) CheckRequestTags() error {
if h == nil {
@@ -771,7 +789,7 @@ type Hostinfo struct {
// "5.10.0-17-amd64".
OSVersion string `json:",omitempty"`
- Container opt.Bool `json:",omitempty"` // whether the client is running in a container
+ Container opt.Bool `json:",omitempty"` // best-effort whether the client is running in a container
Env string `json:",omitempty"` // a hostinfo.EnvType in string form
Distro string `json:",omitempty"` // "debian", "ubuntu", "nixos", ...
DistroVersion string `json:",omitempty"` // "20.04", ...
@@ -804,6 +822,7 @@ type Hostinfo struct {
Userspace opt.Bool `json:",omitempty"` // if the client is running in userspace (netstack) mode
UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode
AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service
+ ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n
// Location represents geographical location data about a
// Tailscale host. Location is optional and only set if
@@ -814,6 +833,26 @@ type Hostinfo struct {
// require changes to Hostinfo.Equal.
}
+// VIPService represents a service created on a tailnet from the
+// perspective of a node providing that service. These services
+// have an virtual IP (VIP) address pair distinct from the node's IPs.
+type VIPService struct {
+ // Name is the name of the service, of the form `svc:dns-label`.
+ // See CheckServiceName for a validation func.
+ // Name uniquely identifies a service on a particular tailnet,
+ // and so also corresponds uniquely to the pair of IP addresses
+ // belonging to the VIP service.
+ Name string
+
+ // Ports specify which ProtoPorts are made available by this node
+ // on the service's IPs.
+ Ports []ProtoPortRange
+
+ // Active specifies whether new requests for the service should be
+ // sent to this node by control.
+ Active bool
+}
+
// TailscaleSSHEnabled reports whether or not this node is acting as a
// Tailscale SSH server.
func (hi *Hostinfo) TailscaleSSHEnabled() bool {
@@ -1413,6 +1452,11 @@ const (
// user groups as Kubernetes user groups. This capability is read by
// peers that are Tailscale Kubernetes operator instances.
PeerCapabilityKubernetes PeerCapability = "tailscale.com/cap/kubernetes"
+
+ // PeerCapabilityServicesDestination grants a peer the ability to serve as
+ // a destination for a set of given VIP services, which is provided as the
+ // value of this key in NodeCapMap.
+ PeerCapabilityServicesDestination PeerCapability = "tailscale.com/cap/services-destination"
)
// NodeCapMap is a map of capabilities to their optional values. It is valid for
diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go
index 61564f3f8bfd4..f4f02c01721dc 100644
--- a/tailcfg/tailcfg_clone.go
+++ b/tailcfg/tailcfg_clone.go
@@ -183,6 +183,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct {
Userspace opt.Bool
UserspaceRouter opt.Bool
AppConnector opt.Bool
+ ServicesHash string
Location *Location
}{})
diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go
index 0d06366771d6e..9f8c418a1ccf9 100644
--- a/tailcfg/tailcfg_test.go
+++ b/tailcfg/tailcfg_test.go
@@ -66,6 +66,7 @@ func TestHostinfoEqual(t *testing.T) {
"Userspace",
"UserspaceRouter",
"AppConnector",
+ "ServicesHash",
"Location",
}
if have := fieldsOf(reflect.TypeFor[Hostinfo]()); !reflect.DeepEqual(have, hiHandles) {
@@ -240,6 +241,16 @@ func TestHostinfoEqual(t *testing.T) {
&Hostinfo{AppConnector: opt.Bool("false")},
false,
},
+ {
+ &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"},
+ &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"},
+ true,
+ },
+ {
+ &Hostinfo{ServicesHash: "084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0"},
+ &Hostinfo{},
+ false,
+ },
}
for i, tt := range tests {
got := tt.a.Equal(tt.b)
diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go
index a3e19b0dcec7a..f275a6a9da5f2 100644
--- a/tailcfg/tailcfg_view.go
+++ b/tailcfg/tailcfg_view.go
@@ -318,6 +318,7 @@ func (v HostinfoView) Cloud() string { return v.ж.Clou
func (v HostinfoView) Userspace() opt.Bool { return v.ж.Userspace }
func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter }
func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector }
+func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash }
func (v HostinfoView) Location() *Location {
if v.ж.Location == nil {
return nil
@@ -365,6 +366,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct {
Userspace opt.Bool
UserspaceRouter opt.Bool
AppConnector opt.Bool
+ ServicesHash string
Location *Location
}{})
diff --git a/tool/gocross/autoflags.go b/tool/gocross/autoflags.go
index c66cab55a6770..b28d3bc5dd26e 100644
--- a/tool/gocross/autoflags.go
+++ b/tool/gocross/autoflags.go
@@ -35,7 +35,7 @@ func autoflagsForTest(argv []string, env *Environment, goroot, nativeGOOS, nativ
cc = "cc"
targetOS = cmp.Or(env.Get("GOOS", ""), nativeGOOS)
targetArch = cmp.Or(env.Get("GOARCH", ""), nativeGOARCH)
- buildFlags = []string{"-trimpath"}
+ buildFlags = []string{}
cgoCflags = []string{"-O3", "-std=gnu11", "-g"}
cgoLdflags []string
ldflags []string
@@ -47,6 +47,10 @@ func autoflagsForTest(argv []string, env *Environment, goroot, nativeGOOS, nativ
subcommand = argv[1]
}
+ if subcommand != "test" {
+ buildFlags = append(buildFlags, "-trimpath")
+ }
+
switch subcommand {
case "build", "env", "install", "run", "test", "list":
default:
@@ -146,7 +150,11 @@ func autoflagsForTest(argv []string, env *Environment, goroot, nativeGOOS, nativ
case env.IsSet("MACOSX_DEPLOYMENT_TARGET"):
xcodeFlags = append(xcodeFlags, "-mmacosx-version-min="+env.Get("MACOSX_DEPLOYMENT_TARGET", ""))
case env.IsSet("TVOS_DEPLOYMENT_TARGET"):
- xcodeFlags = append(xcodeFlags, "-mtvos-version-min="+env.Get("TVOS_DEPLOYMENT_TARGET", ""))
+ if env.Get("TARGET_DEVICE_PLATFORM_NAME", "") == "appletvsimulator" {
+ xcodeFlags = append(xcodeFlags, "-mtvos-simulator-version-min="+env.Get("TVOS_DEPLOYMENT_TARGET", ""))
+ } else {
+ xcodeFlags = append(xcodeFlags, "-mtvos-version-min="+env.Get("TVOS_DEPLOYMENT_TARGET", ""))
+ }
default:
return nil, nil, fmt.Errorf("invoked by Xcode but couldn't figure out deployment target. Did Xcode change its envvars again?")
}
diff --git a/tool/gocross/autoflags_test.go b/tool/gocross/autoflags_test.go
index 8f24dd8a32797..a0f3edfd2bb68 100644
--- a/tool/gocross/autoflags_test.go
+++ b/tool/gocross/autoflags_test.go
@@ -163,7 +163,6 @@ GOTOOLCHAIN=local (was )
TS_LINK_FAIL_REFLECT=0 (was )`,
wantArgv: []string{
"gocross", "test",
- "-trimpath",
"-tags=tailscale_go,osusergo,netgo",
"-ldflags", "-X tailscale.com/version.longStamp=1.2.3-long -X tailscale.com/version.shortStamp=1.2.3 -X tailscale.com/version.gitCommitStamp=abcd -X tailscale.com/version.extraGitCommitStamp=defg '-extldflags=-static'",
"-race",
diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go
index 0be33ba8a5d37..34cab7385558b 100644
--- a/tsnet/tsnet.go
+++ b/tsnet/tsnet.go
@@ -126,6 +126,7 @@ type Server struct {
initOnce sync.Once
initErr error
lb *ipnlocal.LocalBackend
+ sys *tsd.System
netstack *netstack.Impl
netMon *netmon.Monitor
rootPath string // the state directory
@@ -432,8 +433,7 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) {
return
}
addrs := nm.GetAddresses()
- for i := range addrs.Len() {
- addr := addrs.At(i)
+ for _, addr := range addrs.All() {
ip := addr.Addr()
if ip.Is6() {
ip6 = ip
@@ -518,6 +518,7 @@ func (s *Server) start() (reterr error) {
}
sys := new(tsd.System)
+ s.sys = sys
if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil {
return err
}
@@ -546,7 +547,7 @@ func (s *Server) start() (reterr error) {
sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry())
// TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how?
- ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil)
+ ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper())
if err != nil {
return fmt.Errorf("netstack.Create: %w", err)
}
@@ -903,6 +904,7 @@ func (s *Server) APIClient() (*tailscale.Client, error) {
}
c := tailscale.NewClient("-", nil)
+ c.UserAgent = "tailscale-tsnet"
c.HTTPClient = &http.Client{Transport: s.lb.KeyProvingNoiseRoundTripper()}
return c, nil
}
@@ -1226,6 +1228,13 @@ func (s *Server) CapturePcap(ctx context.Context, pcapFile string) error {
return nil
}
+// Sys returns a handle to the Tailscale subsystems of this node.
+//
+// This is not a stable API, nor are the APIs of the returned subsystems.
+func (s *Server) Sys() *tsd.System {
+ return s.sys
+}
+
type listenKey struct {
network string
host netip.Addr // or zero value for unspecified
diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go
index 255baf618c0b3..14d600817ad70 100644
--- a/tsnet/tsnet_test.go
+++ b/tsnet/tsnet_test.go
@@ -36,8 +36,8 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"golang.org/x/net/proxy"
+ "tailscale.com/client/tailscale"
"tailscale.com/cmd/testwrapper/flakytest"
- "tailscale.com/health"
"tailscale.com/ipn"
"tailscale.com/ipn/store/mem"
"tailscale.com/net/netns"
@@ -821,16 +821,6 @@ func TestUDPConn(t *testing.T) {
}
}
-// testWarnable is a Warnable that is used within this package for testing purposes only.
-var testWarnable = health.Register(&health.Warnable{
- Code: "test-warnable-tsnet",
- Title: "Test warnable",
- Severity: health.SeverityLow,
- Text: func(args health.Args) string {
- return args[health.ArgError]
- },
-})
-
func parseMetrics(m []byte) (map[string]float64, error) {
metrics := make(map[string]float64)
@@ -874,15 +864,213 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string {
return b.String()
}
-func TestUserMetrics(t *testing.T) {
- flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420")
- tstest.ResourceCheck(t)
+// sendData sends a given amount of bytes from s1 to s2.
+func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error {
+ l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip)))
+ defer l.Close()
+
+ // Dial to s1 from s2
+ w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip))
+ if err != nil {
+ return err
+ }
+ defer w.Close()
+
+ stopReceive := make(chan struct{})
+ defer close(stopReceive)
+ allReceived := make(chan error)
+ defer close(allReceived)
+
+ go func() {
+ conn, err := l.Accept()
+ if err != nil {
+ allReceived <- err
+ return
+ }
+ conn.SetWriteDeadline(time.Now().Add(30 * time.Second))
+
+ total := 0
+ recvStart := time.Now()
+ for {
+ got := make([]byte, bytesCount)
+ n, err := conn.Read(got)
+ if err != nil {
+ allReceived <- fmt.Errorf("failed reading packet, %s", err)
+ return
+ }
+ got = got[:n]
+
+ select {
+ case <-stopReceive:
+ return
+ default:
+ }
+
+ total += n
+ logf("received %d/%d bytes, %.2f %%", total, bytesCount, (float64(total) / (float64(bytesCount)) * 100))
+
+ // Validate the received bytes to be the same as the sent bytes.
+ for _, b := range string(got) {
+ if b != 'A' {
+ allReceived <- fmt.Errorf("received unexpected byte: %c", b)
+ return
+ }
+ }
+
+ if total == bytesCount {
+ break
+ }
+ }
+
+ logf("all received, took: %s", time.Since(recvStart).String())
+ allReceived <- nil
+ }()
+
+ sendStart := time.Now()
+ w.SetWriteDeadline(time.Now().Add(30 * time.Second))
+ if _, err := w.Write(bytes.Repeat([]byte("A"), bytesCount)); err != nil {
+ stopReceive <- struct{}{}
+ return err
+ }
+
+ logf("all sent (%s), waiting for all packets (%d) to be received", time.Since(sendStart).String(), bytesCount)
+ err, _ = <-allReceived
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func TestUserMetricsByteCounters(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
+ defer cancel()
+
+ controlURL, _ := startControl(t)
+ s1, s1ip, _ := startServer(t, ctx, controlURL, "s1")
+ defer s1.Close()
+ s2, s2ip, _ := startServer(t, ctx, controlURL, "s2")
+ defer s2.Close()
+
+ lc1, err := s1.LocalClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ lc2, err := s2.LocalClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Force an update to the netmap to ensure that the metrics are up-to-date.
+ s1.lb.DebugForceNetmapUpdate()
+ s2.lb.DebugForceNetmapUpdate()
+
+ // Wait for both nodes to have a peer in their netmap.
+ waitForCondition(t, "waiting for netmaps to contain peer", 90*time.Second, func() bool {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ status1, err := lc1.Status(ctx)
+ if err != nil {
+ t.Logf("getting status: %s", err)
+ return false
+ }
+ status2, err := lc2.Status(ctx)
+ if err != nil {
+ t.Logf("getting status: %s", err)
+ return false
+ }
+ return len(status1.Peers()) > 0 && len(status2.Peers()) > 0
+ })
+
+ // ping to make sure the connection is up.
+ res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP)
+ if err != nil {
+ t.Fatalf("pinging: %s", err)
+ }
+ t.Logf("ping success: %#+v", res)
+
+ mustDirect(t, t.Logf, lc1, lc2)
+
+ // 1 megabytes
+ bytesToSend := 1 * 1024 * 1024
+
+ // This asserts generates some traffic, it is factored out
+ // of TestUDPConn.
+ start := time.Now()
+ err = sendData(t.Logf, ctx, bytesToSend, s1, s2, s1ip, s2ip)
+ if err != nil {
+ t.Fatalf("Failed to send packets: %v", err)
+ }
+ t.Logf("Sent %d bytes from s1 to s2 in %s", bytesToSend, time.Since(start).String())
+
+ ctxLc, cancelLc := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancelLc()
+ metrics1, err := lc1.UserMetrics(ctxLc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parsedMetrics1, err := parseMetrics(metrics1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Allow the metrics for the bytes sent to be off by 15%.
+ bytesSentTolerance := 1.15
+
+ t.Logf("Metrics1:\n%s\n", metrics1)
+
+ // Verify that the amount of data recorded in bytes is higher or equal to the data sent
+ inboundBytes1 := parsedMetrics1[`tailscaled_inbound_bytes_total{path="direct_ipv4"}`]
+ if inboundBytes1 < float64(bytesToSend) {
+ t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, inboundBytes1)
+ }
+
+ // But ensure that it is not too much higher than the data sent.
+ if inboundBytes1 > float64(bytesToSend)*bytesSentTolerance {
+ t.Errorf(`metrics1, tailscaled_inbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, inboundBytes1)
+ }
+
+ metrics2, err := lc2.UserMetrics(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ parsedMetrics2, err := parseMetrics(metrics2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Logf("Metrics2:\n%s\n", metrics2)
+
+ // Verify that the amount of data recorded in bytes is higher or equal than the data sent.
+ outboundBytes2 := parsedMetrics2[`tailscaled_outbound_bytes_total{path="direct_ipv4"}`]
+ if outboundBytes2 < float64(bytesToSend) {
+ t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected higher (or equal) than %d, got: %f`, bytesToSend, outboundBytes2)
+ }
+
+ // But ensure that it is not too much higher than the data sent.
+ if outboundBytes2 > float64(bytesToSend)*bytesSentTolerance {
+ t.Errorf(`metrics2, tailscaled_outbound_bytes_total{path="direct_ipv4"}: expected lower than %f, got: %f`, float64(bytesToSend)*bytesSentTolerance, outboundBytes2)
+ }
+}
+
+func TestUserMetricsRouteGauges(t *testing.T) {
+ // Windows does not seem to support or report back routes when running in
+ // userspace via tsnet. So, we skip this check on Windows.
+ // TODO(kradalby): Figure out if this is correct.
+ if runtime.GOOS == "windows" {
+ t.Skipf("skipping on windows")
+ }
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
controlURL, c := startControl(t)
- s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1")
+ s1, _, s1PubKey := startServer(t, ctx, controlURL, "s1")
+ defer s1.Close()
s2, _, _ := startServer(t, ctx, controlURL, "s2")
+ defer s2.Close()
s1.lb.EditPrefs(&ipn.MaskedPrefs{
Prefs: ipn.Prefs{
@@ -911,24 +1099,11 @@ func TestUserMetrics(t *testing.T) {
t.Fatal(err)
}
- // ping to make sure the connection is up.
- res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP)
- if err != nil {
- t.Fatalf("pinging: %s", err)
- }
- t.Logf("ping success: %#+v", res)
-
- ht := s1.lb.HealthTracker()
- ht.SetUnhealthy(testWarnable, health.Args{"Text": "Hello world 1"})
-
// Force an update to the netmap to ensure that the metrics are up-to-date.
s1.lb.DebugForceNetmapUpdate()
s2.lb.DebugForceNetmapUpdate()
wantRoutes := float64(2)
- if runtime.GOOS == "windows" {
- wantRoutes = 0
- }
// Wait for the routes to be propagated to node 1 to ensure
// that the metrics are up-to-date.
@@ -940,12 +1115,6 @@ func TestUserMetrics(t *testing.T) {
t.Logf("getting status: %s", err)
return false
}
- if runtime.GOOS == "windows" {
- // Windows does not seem to support or report back routes when running in
- // userspace via tsnet. So, we skip this check on Windows.
- // TODO(kradalby): Figure out if this is correct.
- return true
- }
// Wait for the primary routes to reach our desired routes, which is wantRoutes + 1, because
// the PrimaryRoutes list will contain a exit node route, which the metric does not count.
return status1.Self.PrimaryRoutes != nil && status1.Self.PrimaryRoutes.Len() == int(wantRoutes)+1
@@ -958,11 +1127,6 @@ func TestUserMetrics(t *testing.T) {
t.Fatal(err)
}
- status1, err := lc1.Status(ctxLc)
- if err != nil {
- t.Fatal(err)
- }
-
parsedMetrics1, err := parseMetrics(metrics1)
if err != nil {
t.Fatal(err)
@@ -985,28 +1149,11 @@ func TestUserMetrics(t *testing.T) {
t.Errorf("metrics1, tailscaled_approved_routes: got %v, want %v", got, want)
}
- // Validate the health counter metric against the status of the node
- if got, want := parsedMetrics1[`tailscaled_health_messages{type="warning"}`], float64(len(status1.Health)); got != want {
- t.Errorf("metrics1, tailscaled_health_messages: got %v, want %v", got, want)
- }
-
- // The node is the primary subnet router for 2 routes:
- // - 192.0.2.0/24
- // - 192.0.5.1/32
- if got, want := parsedMetrics1["tailscaled_primary_routes"], wantRoutes; got != want {
- t.Errorf("metrics1, tailscaled_primary_routes: got %v, want %v", got, want)
- }
-
metrics2, err := lc2.UserMetrics(ctx)
if err != nil {
t.Fatal(err)
}
- status2, err := lc2.Status(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
parsedMetrics2, err := parseMetrics(metrics2)
if err != nil {
t.Fatal(err)
@@ -1023,16 +1170,6 @@ func TestUserMetrics(t *testing.T) {
if got, want := parsedMetrics2["tailscaled_approved_routes"], 0.0; got != want {
t.Errorf("metrics2, tailscaled_approved_routes: got %v, want %v", got, want)
}
-
- // Validate the health counter metric against the status of the node
- if got, want := parsedMetrics2[`tailscaled_health_messages{type="warning"}`], float64(len(status2.Health)); got != want {
- t.Errorf("metrics2, tailscaled_health_messages: got %v, want %v", got, want)
- }
-
- // The node is the primary subnet router for 0 routes
- if got, want := parsedMetrics2["tailscaled_primary_routes"], 0.0; got != want {
- t.Errorf("metrics2, tailscaled_primary_routes: got %v, want %v", got, want)
- }
}
func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func() bool) {
@@ -1044,3 +1181,33 @@ func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func()
}
t.Fatalf("waiting for condition: %s", msg)
}
+
+// mustDirect ensures there is a direct connection between LocalClient 1 and 2
+func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *tailscale.LocalClient) {
+ t.Helper()
+ lastLog := time.Now().Add(-time.Minute)
+ // See https://github.com/tailscale/tailscale/issues/654
+ // and https://github.com/tailscale/tailscale/issues/3247 for discussions of this deadline.
+ for deadline := time.Now().Add(30 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ status1, err := lc1.Status(ctx)
+ if err != nil {
+ continue
+ }
+ status2, err := lc2.Status(ctx)
+ if err != nil {
+ continue
+ }
+ pst := status1.Peer[status2.Self.PublicKey]
+ if pst.CurAddr != "" {
+ logf("direct link %s->%s found with addr %s", status1.Self.HostName, status2.Self.HostName, pst.CurAddr)
+ return
+ }
+ if now := time.Now(); now.Sub(lastLog) > time.Second {
+ logf("no direct path %s->%s yet, addrs %v", status1.Self.HostName, status2.Self.HostName, pst.Addrs)
+ lastLog = now
+ }
+ }
+ t.Error("magicsock did not find a direct path from lc1 to lc2")
+}
diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go
index 57db2b79aa3c7..00faa8a386db8 100644
--- a/tstest/deptest/deptest.go
+++ b/tstest/deptest/deptest.go
@@ -13,14 +13,19 @@ import (
"path/filepath"
"regexp"
"runtime"
+ "slices"
"strings"
"testing"
+
+ "tailscale.com/util/set"
)
type DepChecker struct {
- GOOS string // optional
- GOARCH string // optional
- BadDeps map[string]string // package => why
+ GOOS string // optional
+ GOARCH string // optional
+ BadDeps map[string]string // package => why
+ WantDeps set.Set[string] // packages expected
+ Tags string // comma-separated
}
func (c DepChecker) Check(t *testing.T) {
@@ -29,7 +34,7 @@ func (c DepChecker) Check(t *testing.T) {
t.Skip("skipping dep tests on windows hosts")
}
t.Helper()
- cmd := exec.Command("go", "list", "-json", ".")
+ cmd := exec.Command("go", "list", "-json", "-tags="+c.Tags, ".")
var extraEnv []string
if c.GOOS != "" {
extraEnv = append(extraEnv, "GOOS="+c.GOOS)
@@ -54,6 +59,11 @@ func (c DepChecker) Check(t *testing.T) {
t.Errorf("package %q is not allowed as a dependency (env: %q); reason: %s", dep, extraEnv, why)
}
}
+ for dep := range c.WantDeps {
+ if !slices.Contains(res.Deps, dep) {
+ t.Errorf("expected package %q to be a dependency (env: %q)", dep, extraEnv)
+ }
+ }
t.Logf("got %d dependencies", len(res.Deps))
}
diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go
index bbcf277d171e1..a6b2e1828b8fe 100644
--- a/tstest/integration/testcontrol/testcontrol.go
+++ b/tstest/integration/testcontrol/testcontrol.go
@@ -26,7 +26,7 @@ import (
"time"
"golang.org/x/net/http2"
- "tailscale.com/control/controlhttp"
+ "tailscale.com/control/controlhttp/controlhttpserver"
"tailscale.com/net/netaddr"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
@@ -288,7 +288,7 @@ func (s *Server) serveNoiseUpgrade(w http.ResponseWriter, r *http.Request) {
s.mu.Lock()
noisePrivate := s.noisePrivKey
s.mu.Unlock()
- cc, err := controlhttp.AcceptHTTP(ctx, w, r, noisePrivate, nil)
+ cc, err := controlhttpserver.AcceptHTTP(ctx, w, r, noisePrivate, nil)
if err != nil {
log.Printf("AcceptHTTP: %v", err)
return
@@ -832,7 +832,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi
w.WriteHeader(200)
for {
if resBytes, ok := s.takeRawMapMessage(req.NodeKey); ok {
- if err := s.sendMapMsg(w, mkey, compress, resBytes); err != nil {
+ if err := s.sendMapMsg(w, compress, resBytes); err != nil {
s.logf("sendMapMsg of raw message: %v", err)
return
}
@@ -864,7 +864,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi
s.logf("json.Marshal: %v", err)
return
}
- if err := s.sendMapMsg(w, mkey, compress, resBytes); err != nil {
+ if err := s.sendMapMsg(w, compress, resBytes); err != nil {
return
}
}
@@ -895,7 +895,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi
}
break keepAliveLoop
case <-keepAliveTimerCh:
- if err := s.sendMapMsg(w, mkey, compress, keepAliveMsg); err != nil {
+ if err := s.sendMapMsg(w, compress, keepAliveMsg); err != nil {
return
}
}
@@ -1060,7 +1060,7 @@ func (s *Server) takeRawMapMessage(nk key.NodePublic) (mapResJSON []byte, ok boo
return mapResJSON, true
}
-func (s *Server) sendMapMsg(w http.ResponseWriter, mkey key.MachinePublic, compress bool, msg any) error {
+func (s *Server) sendMapMsg(w http.ResponseWriter, compress bool, msg any) error {
resBytes, err := s.encode(compress, msg)
if err != nil {
return err
diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go
index cf71a66743e1c..a37c22a6c8023 100644
--- a/tstest/natlab/vnet/conf.go
+++ b/tstest/natlab/vnet/conf.go
@@ -10,6 +10,7 @@ import (
"net/netip"
"os"
"slices"
+ "time"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcapgo"
@@ -279,10 +280,28 @@ type Network struct {
svcs set.Set[NetworkService]
+ latency time.Duration // latency applied to interface writes
+ lossRate float64 // chance of packet loss (0.0 to 1.0)
+
// ...
err error // carried error
}
+// SetLatency sets the simulated network latency for this network.
+func (n *Network) SetLatency(d time.Duration) {
+ n.latency = d
+}
+
+// SetPacketLoss sets the packet loss rate for this network 0.0 (no loss) to 1.0 (total loss).
+func (n *Network) SetPacketLoss(rate float64) {
+ if rate < 0 {
+ rate = 0
+ } else if rate > 1 {
+ rate = 1
+ }
+ n.lossRate = rate
+}
+
// SetBlackholedIPv4 sets whether the network should blackhole all IPv4 traffic
// out to the Internet. (DHCP etc continues to work on the LAN.)
func (n *Network) SetBlackholedIPv4(v bool) {
@@ -361,6 +380,8 @@ func (s *Server) initFromConfig(c *Config) error {
wanIP4: conf.wanIP4,
lanIP4: conf.lanIP4,
breakWAN4: conf.breakWAN4,
+ latency: conf.latency,
+ lossRate: conf.lossRate,
nodesByIP4: map[netip.Addr]*node{},
nodesByMAC: map[MAC]*node{},
logf: logger.WithPrefix(s.logf, fmt.Sprintf("[net-%v] ", conf.mac)),
diff --git a/tstest/natlab/vnet/conf_test.go b/tstest/natlab/vnet/conf_test.go
index 15d3c69ef52d9..6566ac8cf4610 100644
--- a/tstest/natlab/vnet/conf_test.go
+++ b/tstest/natlab/vnet/conf_test.go
@@ -3,7 +3,10 @@
package vnet
-import "testing"
+import (
+ "testing"
+ "time"
+)
func TestConfig(t *testing.T) {
tests := []struct {
@@ -18,6 +21,16 @@ func TestConfig(t *testing.T) {
c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", HardNAT))
},
},
+ {
+ name: "latency-and-loss",
+ setup: func(c *Config) {
+ n1 := c.AddNetwork("2.1.1.1", "192.168.1.1/24", EasyNAT, NATPMP)
+ n1.SetLatency(time.Second)
+ n1.SetPacketLoss(0.1)
+ c.AddNode(n1)
+ c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", HardNAT))
+ },
+ },
{
name: "indirect",
setup: func(c *Config) {
diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go
index e7991b3e6ef5d..92312c039bfc9 100644
--- a/tstest/natlab/vnet/vnet.go
+++ b/tstest/natlab/vnet/vnet.go
@@ -515,6 +515,8 @@ type network struct {
wanIP4 netip.Addr // router's LAN IPv4, if any
lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24)
breakWAN4 bool // break WAN IPv4 connectivity
+ latency time.Duration // latency applied to interface writes
+ lossRate float64 // probability of dropping a packet (0.0 to 1.0)
nodesByIP4 map[netip.Addr]*node // by LAN IPv4
nodesByMAC map[MAC]*node
logf func(format string, args ...any)
@@ -977,7 +979,7 @@ func (n *network) writeEth(res []byte) bool {
for mac, nw := range n.writers.All() {
if mac != srcMAC {
num++
- nw.write(res)
+ n.conditionedWrite(nw, res)
}
}
return num > 0
@@ -987,7 +989,7 @@ func (n *network) writeEth(res []byte) bool {
return false
}
if nw, ok := n.writers.Load(dstMAC); ok {
- nw.write(res)
+ n.conditionedWrite(nw, res)
return true
}
@@ -1000,6 +1002,23 @@ func (n *network) writeEth(res []byte) bool {
return false
}
+func (n *network) conditionedWrite(nw networkWriter, packet []byte) {
+ if n.lossRate > 0 && rand.Float64() < n.lossRate {
+ // packet lost
+ return
+ }
+ if n.latency > 0 {
+ // copy the packet as there's no guarantee packet is owned long enough.
+ // TODO(raggi): this could be optimized substantially if necessary,
+ // a pool of buffers and a cheaper delay mechanism are both obvious improvements.
+ var pkt = make([]byte, len(packet))
+ copy(pkt, packet)
+ time.AfterFunc(n.latency, func() { nw.write(pkt) })
+ } else {
+ nw.write(packet)
+ }
+}
+
var (
macAllNodes = MAC{0: 0x33, 1: 0x33, 5: 0x01}
macAllRouters = MAC{0: 0x33, 1: 0x33, 5: 0x02}
diff --git a/tstest/resource.go b/tstest/resource.go
index a3c292094fac6..b094c7911014f 100644
--- a/tstest/resource.go
+++ b/tstest/resource.go
@@ -29,7 +29,8 @@ func ResourceCheck(tb testing.TB) {
startN, startStacks := goroutines()
tb.Cleanup(func() {
if tb.Failed() {
- // Something else went wrong.
+ // Test has failed - but this doesn't catch panics due to
+ // https://github.com/golang/go/issues/49929.
return
}
// Goroutines might be still exiting.
@@ -44,7 +45,10 @@ func ResourceCheck(tb testing.TB) {
return
}
tb.Logf("goroutine diff:\n%v\n", cmp.Diff(startStacks, endStacks))
- tb.Fatalf("goroutine count: expected %d, got %d\n", startN, endN)
+
+ // tb.Failed() above won't report on panics, so we shouldn't call Fatal
+ // here or we risk suppressing reporting of the panic.
+ tb.Errorf("goroutine count: expected %d, got %d\n", startN, endN)
})
}
diff --git a/tstest/tailmac/Swift/Common/Config.swift b/tstest/tailmac/Swift/Common/Config.swift
index 01d5069b0049d..18b68ae9b9d14 100644
--- a/tstest/tailmac/Swift/Common/Config.swift
+++ b/tstest/tailmac/Swift/Common/Config.swift
@@ -14,6 +14,7 @@ class Config: Codable {
var mac = "52:cc:cc:cc:cc:01"
var ethermac = "52:cc:cc:cc:ce:01"
var port: UInt32 = 51009
+ var sharedDir: String?
// The virtual machines ID. Also double as the directory name under which
// we will store configuration, block device, etc.
diff --git a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift
index 00f999a158c19..c0961c883fdbb 100644
--- a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift
+++ b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift
@@ -141,5 +141,18 @@ struct TailMacConfigHelper {
func createKeyboardConfiguration() -> VZKeyboardConfiguration {
return VZMacKeyboardConfiguration()
}
+
+ func createDirectoryShareConfiguration(tag: String) -> VZDirectorySharingDeviceConfiguration? {
+ guard let dir = config.sharedDir else { return nil }
+
+ let sharedDir = VZSharedDirectory(url: URL(fileURLWithPath: dir), readOnly: false)
+ let share = VZSingleDirectoryShare(directory: sharedDir)
+
+ // Create the VZVirtioFileSystemDeviceConfiguration and assign it a unique tag.
+ let sharingConfiguration = VZVirtioFileSystemDeviceConfiguration(tag: tag)
+ sharingConfiguration.share = share
+
+ return sharingConfiguration
+ }
}
diff --git a/tstest/tailmac/Swift/Host/HostCli.swift b/tstest/tailmac/Swift/Host/HostCli.swift
index 1318a09fa546e..c31478cc39d45 100644
--- a/tstest/tailmac/Swift/Host/HostCli.swift
+++ b/tstest/tailmac/Swift/Host/HostCli.swift
@@ -19,10 +19,12 @@ var config: Config = Config()
extension HostCli {
struct Run: ParsableCommand {
@Option var id: String
+ @Option var share: String?
mutating func run() {
- print("Running vm with identifier \(id)")
config = Config(id)
+ config.sharedDir = share
+ print("Running vm with identifier \(id) and sharedDir \(share ?? "")")
_ = NSApplicationMain(CommandLine.argc, CommandLine.unsafeArgv)
}
}
diff --git a/tstest/tailmac/Swift/Host/VMController.swift b/tstest/tailmac/Swift/Host/VMController.swift
index 8774894c1157a..fe4a3828b18fe 100644
--- a/tstest/tailmac/Swift/Host/VMController.swift
+++ b/tstest/tailmac/Swift/Host/VMController.swift
@@ -95,6 +95,13 @@ class VMController: NSObject, VZVirtualMachineDelegate {
virtualMachineConfiguration.keyboards = [helper.createKeyboardConfiguration()]
virtualMachineConfiguration.socketDevices = [helper.createSocketDeviceConfiguration()]
+ if let dir = config.sharedDir, let shareConfig = helper.createDirectoryShareConfiguration(tag: "vmshare") {
+ print("Sharing \(dir) as vmshare. Use: mount_virtiofs vmshare in the guest to mount.")
+ virtualMachineConfiguration.directorySharingDevices = [shareConfig]
+ } else {
+ print("No shared directory created. \(config.sharedDir ?? "none") was requested.")
+ }
+
try! virtualMachineConfiguration.validate()
try! virtualMachineConfiguration.validateSaveRestoreSupport()
diff --git a/tstest/tailmac/Swift/TailMac/TailMac.swift b/tstest/tailmac/Swift/TailMac/TailMac.swift
index 56f651696e12c..84aa5e498a008 100644
--- a/tstest/tailmac/Swift/TailMac/TailMac.swift
+++ b/tstest/tailmac/Swift/TailMac/TailMac.swift
@@ -95,12 +95,16 @@ extension Tailmac {
extension Tailmac {
struct Run: ParsableCommand {
@Option(help: "The vm identifier") var id: String
+ @Option(help: "Optional share directory") var share: String?
@Flag(help: "Tail the TailMac log output instead of returning immediatly") var tail
mutating func run() {
let process = Process()
let stdOutPipe = Pipe()
- let appPath = "./Host.app/Contents/MacOS/Host"
+
+ let executablePath = CommandLine.arguments[0]
+ let executableDirectory = (executablePath as NSString).deletingLastPathComponent
+ let appPath = executableDirectory + "/Host.app/Contents/MacOS/Host"
process.executableURL = URL(
fileURLWithPath: appPath,
@@ -109,10 +113,15 @@ extension Tailmac {
)
if !FileManager.default.fileExists(atPath: appPath) {
- fatalError("Could not find Host.app. This must be co-located with the tailmac utility")
+ fatalError("Could not find Host.app at \(appPath). This must be co-located with the tailmac utility")
}
- process.arguments = ["run", "--id", id]
+ var args = ["run", "--id", id]
+ if let share {
+ args.append("--share")
+ args.append(share)
+ }
+ process.arguments = args
do {
process.standardOutput = stdOutPipe
@@ -121,26 +130,18 @@ extension Tailmac {
fatalError("Unable to launch the vm process")
}
- // This doesn't print until we exit which is not ideal, but at least we
- // get the output
if tail != 0 {
+ // (jonathan)TODO: How do we get the process output in real time?
+ // The child process only seems to flush to stdout on completion
let outHandle = stdOutPipe.fileHandleForReading
-
- let queue = OperationQueue()
- NotificationCenter.default.addObserver(
- forName: NSNotification.Name.NSFileHandleDataAvailable,
- object: outHandle, queue: queue)
- {
- notification -> Void in
- let data = outHandle.availableData
+ outHandle.readabilityHandler = { handle in
+ let data = handle.availableData
if data.count > 0 {
if let str = String(data: data, encoding: String.Encoding.utf8) {
print(str)
}
}
- outHandle.waitForDataInBackgroundAndNotify()
}
- outHandle.waitForDataInBackgroundAndNotify()
process.waitUntilExit()
}
}
diff --git a/tsweb/request_id.go b/tsweb/request_id.go
index 8516b8f72161e..46e52385240ca 100644
--- a/tsweb/request_id.go
+++ b/tsweb/request_id.go
@@ -6,9 +6,10 @@ package tsweb
import (
"context"
"net/http"
+ "time"
"tailscale.com/util/ctxkey"
- "tailscale.com/util/fastuuid"
+ "tailscale.com/util/rands"
)
// RequestID is an opaque identifier for a HTTP request, used to correlate
@@ -41,10 +42,12 @@ const RequestIDHeader = "X-Tailscale-Request-Id"
// GenerateRequestID generates a new request ID with the current format.
func GenerateRequestID() RequestID {
- // REQ-1 indicates the version of the RequestID pattern. It is
- // currently arbitrary but allows for forward compatible
- // transitions if needed.
- return RequestID("REQ-1" + fastuuid.NewUUID().String())
+ // Return a string of the form "REQ-<...>"
+ // Previously we returned "REQ-1".
+ // Now we return "REQ-2" version, where the "2" doubles as the year 2YYY
+ // in a leading date.
+ now := time.Now().UTC()
+ return RequestID("REQ-" + now.Format("20060102150405") + rands.HexString(16))
}
// SetRequestID is an HTTP middleware that injects a RequestID in the
diff --git a/tsweb/tsweb_test.go b/tsweb/tsweb_test.go
index 13840c01225e3..d4c9721e97215 100644
--- a/tsweb/tsweb_test.go
+++ b/tsweb/tsweb_test.go
@@ -1307,6 +1307,28 @@ func TestBucket(t *testing.T) {
}
}
+func TestGenerateRequestID(t *testing.T) {
+ t0 := time.Now()
+ got := GenerateRequestID()
+ t.Logf("Got: %q", got)
+ if !strings.HasPrefix(string(got), "REQ-2") {
+ t.Errorf("expect REQ-2 prefix; got %q", got)
+ }
+ const wantLen = len("REQ-2024112022140896f8ead3d3f3be27")
+ if len(got) != wantLen {
+ t.Fatalf("len = %d; want %d", len(got), wantLen)
+ }
+ d := got[len("REQ-"):][:14]
+ timeBack, err := time.Parse("20060102150405", string(d))
+ if err != nil {
+ t.Fatalf("parsing time back: %v", err)
+ }
+ elapsed := timeBack.Sub(t0)
+ if elapsed > 3*time.Second { // allow for slow github actions runners :)
+ t.Fatalf("time back was %v; want within 3s", elapsed)
+ }
+}
+
func ExampleMiddlewareStack() {
// setHeader returns a middleware that sets header k = vs.
setHeader := func(k string, vs ...string) Middleware {
diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go
index 561b2487710e3..952ebc23134c2 100644
--- a/tsweb/varz/varz.go
+++ b/tsweb/varz/varz.go
@@ -23,10 +23,16 @@ import (
"tailscale.com/version"
)
+// StaticStringVar returns a new expvar.Var that always returns s.
+func StaticStringVar(s string) expvar.Var {
+ var v any = s // box s into an interface just once
+ return expvar.Func(func() any { return v })
+}
+
func init() {
expvar.Publish("process_start_unix_time", expvar.Func(func() any { return timeStart.Unix() }))
- expvar.Publish("version", expvar.Func(func() any { return version.Long() }))
- expvar.Publish("go_version", expvar.Func(func() any { return runtime.Version() }))
+ expvar.Publish("version", StaticStringVar(version.Long()))
+ expvar.Publish("go_version", StaticStringVar(runtime.Version()))
expvar.Publish("counter_uptime_sec", expvar.Func(func() any { return int64(Uptime().Seconds()) }))
expvar.Publish("gauge_goroutines", expvar.Func(func() any { return runtime.NumGoroutine() }))
}
diff --git a/types/bools/compare.go b/types/bools/compare.go
new file mode 100644
index 0000000000000..ac433b240755a
--- /dev/null
+++ b/types/bools/compare.go
@@ -0,0 +1,17 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package bools contains the bools.Compare function.
+package bools
+
+// Compare compares two boolean values as if false is ordered before true.
+func Compare[T ~bool](x, y T) int {
+ switch {
+ case x == false && y == true:
+ return -1
+ case x == true && y == false:
+ return +1
+ default:
+ return 0
+ }
+}
diff --git a/types/bools/compare_test.go b/types/bools/compare_test.go
new file mode 100644
index 0000000000000..280294621e719
--- /dev/null
+++ b/types/bools/compare_test.go
@@ -0,0 +1,21 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package bools
+
+import "testing"
+
+func TestCompare(t *testing.T) {
+ if got := Compare(false, false); got != 0 {
+ t.Errorf("Compare(false, false) = %v, want 0", got)
+ }
+ if got := Compare(false, true); got != -1 {
+ t.Errorf("Compare(false, true) = %v, want -1", got)
+ }
+ if got := Compare(true, false); got != +1 {
+ t.Errorf("Compare(true, false) = %v, want +1", got)
+ }
+ if got := Compare(true, true); got != 0 {
+ t.Errorf("Compare(true, true) = %v, want 0", got)
+ }
+}
diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go
index 5e06229221e1f..94e872a5593ea 100644
--- a/types/netmap/netmap.go
+++ b/types/netmap/netmap.go
@@ -279,15 +279,14 @@ func (a *NetworkMap) equalConciseHeader(b *NetworkMap) bool {
// in nodeConciseEqual in sync.
func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) {
aip := make([]string, p.AllowedIPs().Len())
- for i := range aip {
- a := p.AllowedIPs().At(i)
- s := strings.TrimSuffix(fmt.Sprint(a), "/32")
+ for i, a := range p.AllowedIPs().All() {
+ s := strings.TrimSuffix(a.String(), "/32")
aip[i] = s
}
- ep := make([]string, p.Endpoints().Len())
- for i := range ep {
- e := p.Endpoints().At(i).String()
+ epStrs := make([]string, p.Endpoints().Len())
+ for i, ep := range p.Endpoints().All() {
+ e := ep.String()
// Align vertically on the ':' between IP and port
colon := strings.IndexByte(e, ':')
spaces := 0
@@ -295,7 +294,7 @@ func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) {
spaces++
colon--
}
- ep[i] = fmt.Sprintf("%21v", e+strings.Repeat(" ", spaces))
+ epStrs[i] = fmt.Sprintf("%21v", e+strings.Repeat(" ", spaces))
}
derp := p.DERP()
@@ -316,7 +315,7 @@ func printPeerConcise(buf *strings.Builder, p tailcfg.NodeView) {
discoShort,
derp,
strings.Join(aip, " "),
- strings.Join(ep, " "))
+ strings.Join(epStrs, " "))
}
// nodeConciseEqual reports whether a and b are equal for the fields accessed by printPeerConcise.
diff --git a/types/opt/value.go b/types/opt/value.go
index 54fab7a538270..b47b03c81b026 100644
--- a/types/opt/value.go
+++ b/types/opt/value.go
@@ -36,7 +36,7 @@ func ValueOf[T any](v T) Value[T] {
}
// String implements [fmt.Stringer].
-func (o *Value[T]) String() string {
+func (o Value[T]) String() string {
if !o.set {
return fmt.Sprintf("(empty[%T])", o.value)
}
diff --git a/types/result/result.go b/types/result/result.go
new file mode 100644
index 0000000000000..6bd1c2ea62004
--- /dev/null
+++ b/types/result/result.go
@@ -0,0 +1,49 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package result contains the Of result type, which is
+// either a value or an error.
+package result
+
+// Of is either a T value or an error.
+//
+// Think of it like Rust or Swift's result types.
+// It's named "Of" because the fully qualified name
+// for callers reads result.Of[T].
+type Of[T any] struct {
+ v T // valid if Err is nil; invalid if Err is non-nil
+ err error
+}
+
+// Value returns a new result with value v,
+// without an error.
+func Value[T any](v T) Of[T] {
+ return Of[T]{v: v}
+}
+
+// Error returns a new result with error err.
+// If err is nil, the returned result is equivalent
+// to calling Value with T's zero value.
+func Error[T any](err error) Of[T] {
+ return Of[T]{err: err}
+}
+
+// MustValue returns r's result value.
+// It panics if r.Err returns non-nil.
+func (r Of[T]) MustValue() T {
+ if r.err != nil {
+ panic(r.err)
+ }
+ return r.v
+}
+
+// Value returns r's result value and error.
+func (r Of[T]) Value() (T, error) {
+ return r.v, r.err
+}
+
+// Err returns r's error, if any.
+// When r.Err returns nil, it's safe to call r.MustValue without it panicking.
+func (r Of[T]) Err() error {
+ return r.err
+}
diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go
index b2d356b60fcc3..584a24f73dca8 100644
--- a/util/clientmetric/clientmetric.go
+++ b/util/clientmetric/clientmetric.go
@@ -9,6 +9,7 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
+ "expvar"
"fmt"
"io"
"sort"
@@ -16,6 +17,8 @@ import (
"sync"
"sync/atomic"
"time"
+
+ "tailscale.com/util/set"
)
var (
@@ -223,6 +226,54 @@ func NewGaugeFunc(name string, f func() int64) *Metric {
return m
}
+// AggregateCounter returns a sum of expvar counters registered with it.
+type AggregateCounter struct {
+ mu sync.RWMutex
+ counters set.Set[*expvar.Int]
+}
+
+func (c *AggregateCounter) Value() int64 {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ var sum int64
+ for cnt := range c.counters {
+ sum += cnt.Value()
+ }
+ return sum
+}
+
+// Register registers provided expvar counter.
+// When a counter is added to the counter, it will be reset
+// to start counting from 0. This is to avoid incrementing the
+// counter with an unexpectedly large value.
+func (c *AggregateCounter) Register(counter *expvar.Int) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ // No need to do anything if it's already registered.
+ if c.counters.Contains(counter) {
+ return
+ }
+ counter.Set(0)
+ c.counters.Add(counter)
+}
+
+// UnregisterAll unregisters all counters resulting in it
+// starting back down at zero. This is to ensure monotonicity
+// and respect the semantics of the counter.
+func (c *AggregateCounter) UnregisterAll() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.counters = set.Set[*expvar.Int]{}
+}
+
+// NewAggregateCounter returns a new aggregate counter that returns
+// a sum of expvar variables registered with it.
+func NewAggregateCounter(name string) *AggregateCounter {
+ c := &AggregateCounter{counters: set.Set[*expvar.Int]{}}
+ NewGaugeFunc(name, c.Value)
+ return c
+}
+
// WritePrometheusExpositionFormat writes all client metrics to w in
// the Prometheus text-based exposition format.
//
diff --git a/util/clientmetric/clientmetric_test.go b/util/clientmetric/clientmetric_test.go
index ab6c4335afb41..555d7a71170a4 100644
--- a/util/clientmetric/clientmetric_test.go
+++ b/util/clientmetric/clientmetric_test.go
@@ -4,8 +4,11 @@
package clientmetric
import (
+ "expvar"
"testing"
"time"
+
+ qt "github.com/frankban/quicktest"
)
func TestDeltaEncBuf(t *testing.T) {
@@ -107,3 +110,49 @@ func TestWithFunc(t *testing.T) {
t.Errorf("second = %q; want %q", got, want)
}
}
+
+func TestAggregateCounter(t *testing.T) {
+ clearMetrics()
+
+ c := qt.New(t)
+
+ expv1 := &expvar.Int{}
+ expv2 := &expvar.Int{}
+ expv3 := &expvar.Int{}
+
+ aggCounter := NewAggregateCounter("agg_counter")
+
+ aggCounter.Register(expv1)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(0))
+
+ expv1.Add(1)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(1))
+
+ aggCounter.Register(expv2)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(1))
+
+ expv1.Add(1)
+ expv2.Add(1)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(3))
+
+ // Adding a new expvar should not change the value
+ // and any value the counter already had is reset
+ expv3.Set(5)
+ aggCounter.Register(expv3)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(3))
+
+ // Registering the same expvar multiple times should not change the value
+ aggCounter.Register(expv3)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(3))
+
+ aggCounter.UnregisterAll()
+ c.Assert(aggCounter.Value(), qt.Equals, int64(0))
+
+ // Start over
+ expv3.Set(5)
+ aggCounter.Register(expv3)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(0))
+
+ expv3.Set(5)
+ c.Assert(aggCounter.Value(), qt.Equals, int64(5))
+}
diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go
index d998d925d9143..1b3af10e03ee1 100644
--- a/util/codegen/codegen.go
+++ b/util/codegen/codegen.go
@@ -97,6 +97,11 @@ func (it *ImportTracker) Import(pkg string) {
}
}
+// Has reports whether the specified package has been imported.
+func (it *ImportTracker) Has(pkg string) bool {
+ return it.packages[pkg]
+}
+
func (it *ImportTracker) qualifier(pkg *types.Package) string {
if it.thisPkg == pkg {
return ""
@@ -272,11 +277,16 @@ func IsInvalid(t types.Type) bool {
// It has special handling for some types that contain pointers
// that we know are free from memory aliasing/mutation concerns.
func ContainsPointers(typ types.Type) bool {
- switch typ.String() {
+ s := typ.String()
+ switch s {
case "time.Time":
- // time.Time contains a pointer that does not need copying
+ // time.Time contains a pointer that does not need cloning.
return false
- case "inet.af/netip.Addr", "net/netip.Addr", "net/netip.Prefix", "net/netip.AddrPort":
+ case "inet.af/netip.Addr":
+ return false
+ }
+ if strings.HasPrefix(s, "unique.Handle[") {
+ // unique.Handle contains a pointer that does not need cloning.
return false
}
switch ft := typ.Underlying().(type) {
diff --git a/util/codegen/codegen_test.go b/util/codegen/codegen_test.go
index 28ddaed2bac36..74715eecae6ef 100644
--- a/util/codegen/codegen_test.go
+++ b/util/codegen/codegen_test.go
@@ -10,6 +10,8 @@ import (
"strings"
"sync"
"testing"
+ "time"
+ "unique"
"unsafe"
"golang.org/x/exp/constraints"
@@ -84,6 +86,16 @@ type PointerUnionParam[T netip.Prefix | BasicType | IntPtr] struct {
V T
}
+type StructWithUniqueHandle struct{ _ unique.Handle[[32]byte] }
+
+type StructWithTime struct{ _ time.Time }
+
+type StructWithNetipTypes struct {
+ _ netip.Addr
+ _ netip.AddrPort
+ _ netip.Prefix
+}
+
type Interface interface {
Method()
}
@@ -161,6 +173,18 @@ func TestGenericContainsPointers(t *testing.T) {
typ: "PointerUnionParam",
wantPointer: true,
},
+ {
+ typ: "StructWithUniqueHandle",
+ wantPointer: false,
+ },
+ {
+ typ: "StructWithTime",
+ wantPointer: false,
+ },
+ {
+ typ: "StructWithNetipTypes",
+ wantPointer: false,
+ },
}
for _, tt := range tests {
diff --git a/util/fastuuid/fastuuid.go b/util/fastuuid/fastuuid.go
deleted file mode 100644
index 4b115ea4e4974..0000000000000
--- a/util/fastuuid/fastuuid.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-// Package fastuuid implements a UUID construction using an in process CSPRNG.
-package fastuuid
-
-import (
- crand "crypto/rand"
- "encoding/binary"
- "io"
- "math/rand/v2"
- "sync"
-
- "github.com/google/uuid"
-)
-
-// NewUUID returns a new UUID using a pool of generators, good for highly
-// concurrent use.
-func NewUUID() uuid.UUID {
- g := pool.Get().(*generator)
- defer pool.Put(g)
- return g.newUUID()
-}
-
-var pool = sync.Pool{
- New: func() any {
- return newGenerator()
- },
-}
-
-type generator struct {
- rng rand.ChaCha8
-}
-
-func seed() [32]byte {
- var r [32]byte
- if _, err := io.ReadFull(crand.Reader, r[:]); err != nil {
- panic(err)
- }
- return r
-}
-
-func newGenerator() *generator {
- return &generator{
- rng: *rand.NewChaCha8(seed()),
- }
-}
-
-func (g *generator) newUUID() uuid.UUID {
- var u uuid.UUID
- binary.NativeEndian.PutUint64(u[:8], g.rng.Uint64())
- binary.NativeEndian.PutUint64(u[8:], g.rng.Uint64())
- u[6] = (u[6] & 0x0f) | 0x40 // Version 4
- u[8] = (u[8] & 0x3f) | 0x80 // Variant 10
- return u
-}
diff --git a/util/fastuuid/fastuuid_test.go b/util/fastuuid/fastuuid_test.go
deleted file mode 100644
index f0d9939043850..0000000000000
--- a/util/fastuuid/fastuuid_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package fastuuid
-
-import (
- "testing"
-
- "github.com/google/uuid"
-)
-
-func TestNewUUID(t *testing.T) {
- g := pool.Get().(*generator)
- defer pool.Put(g)
- u := g.newUUID()
- if u[6] != (u[6]&0x0f)|0x40 {
- t.Errorf("version bits are incorrect")
- }
- if u[8] != (u[8]&0x3f)|0x80 {
- t.Errorf("variant bits are incorrect")
- }
-}
-
-func BenchmarkBasic(b *testing.B) {
- b.Run("NewUUID", func(b *testing.B) {
- for range b.N {
- NewUUID()
- }
- })
-
- b.Run("uuid.New-unpooled", func(b *testing.B) {
- uuid.DisableRandPool()
- for range b.N {
- uuid.New()
- }
- })
-
- b.Run("uuid.New-pooled", func(b *testing.B) {
- uuid.EnableRandPool()
- for range b.N {
- uuid.New()
- }
- })
-}
-
-func BenchmarkParallel(b *testing.B) {
- b.Run("NewUUID", func(b *testing.B) {
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- NewUUID()
- }
- })
- })
-
- b.Run("uuid.New-unpooled", func(b *testing.B) {
- uuid.DisableRandPool()
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- uuid.New()
- }
- })
- })
-
- b.Run("uuid.New-pooled", func(b *testing.B) {
- uuid.EnableRandPool()
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- uuid.New()
- }
- })
- })
-}
diff --git a/util/lineiter/lineiter.go b/util/lineiter/lineiter.go
new file mode 100644
index 0000000000000..5cb1eeef3ee1d
--- /dev/null
+++ b/util/lineiter/lineiter.go
@@ -0,0 +1,72 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package lineiter iterates over lines in things.
+package lineiter
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "iter"
+ "os"
+
+ "tailscale.com/types/result"
+)
+
+// File returns an iterator that reads lines from the named file.
+//
+// The returned substrings don't include the trailing newline.
+// Lines may be empty.
+func File(name string) iter.Seq[result.Of[[]byte]] {
+ f, err := os.Open(name)
+ return reader(f, f, err)
+}
+
+// Bytes returns an iterator over the lines in bs.
+// The returned substrings don't include the trailing newline.
+// Lines may be empty.
+func Bytes(bs []byte) iter.Seq[[]byte] {
+ return func(yield func([]byte) bool) {
+ for len(bs) > 0 {
+ i := bytes.IndexByte(bs, '\n')
+ if i < 0 {
+ yield(bs)
+ return
+ }
+ if !yield(bs[:i]) {
+ return
+ }
+ bs = bs[i+1:]
+ }
+ }
+}
+
+// Reader returns an iterator over the lines in r.
+//
+// The returned substrings don't include the trailing newline.
+// Lines may be empty.
+func Reader(r io.Reader) iter.Seq[result.Of[[]byte]] {
+ return reader(r, nil, nil)
+}
+
+func reader(r io.Reader, c io.Closer, err error) iter.Seq[result.Of[[]byte]] {
+ return func(yield func(result.Of[[]byte]) bool) {
+ if err != nil {
+ yield(result.Error[[]byte](err))
+ return
+ }
+ if c != nil {
+ defer c.Close()
+ }
+ bs := bufio.NewScanner(r)
+ for bs.Scan() {
+ if !yield(result.Value(bs.Bytes())) {
+ return
+ }
+ }
+ if err := bs.Err(); err != nil {
+ yield(result.Error[[]byte](err))
+ }
+ }
+}
diff --git a/util/lineiter/lineiter_test.go b/util/lineiter/lineiter_test.go
new file mode 100644
index 0000000000000..3373d5fe7b122
--- /dev/null
+++ b/util/lineiter/lineiter_test.go
@@ -0,0 +1,32 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package lineiter
+
+import (
+ "slices"
+ "strings"
+ "testing"
+)
+
+func TestBytesLines(t *testing.T) {
+ var got []string
+ for line := range Bytes([]byte("foo\n\nbar\nbaz")) {
+ got = append(got, string(line))
+ }
+ want := []string{"foo", "", "bar", "baz"}
+ if !slices.Equal(got, want) {
+ t.Errorf("got %q; want %q", got, want)
+ }
+}
+
+func TestReader(t *testing.T) {
+ var got []string
+ for line := range Reader(strings.NewReader("foo\n\nbar\nbaz")) {
+ got = append(got, string(line.MustValue()))
+ }
+ want := []string{"foo", "", "bar", "baz"}
+ if !slices.Equal(got, want) {
+ t.Errorf("got %q; want %q", got, want)
+ }
+}
diff --git a/util/pidowner/pidowner_linux.go b/util/pidowner/pidowner_linux.go
index 2a5181f14e03c..a07f512427062 100644
--- a/util/pidowner/pidowner_linux.go
+++ b/util/pidowner/pidowner_linux.go
@@ -8,26 +8,26 @@ import (
"os"
"strings"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
)
func ownerOfPID(pid int) (userID string, err error) {
file := fmt.Sprintf("/proc/%d/status", pid)
- err = lineread.File(file, func(line []byte) error {
+ for lr := range lineiter.File(file) {
+ line, err := lr.Value()
+ if err != nil {
+ if os.IsNotExist(err) {
+ return "", ErrProcessNotFound
+ }
+ return "", err
+ }
if len(line) < 4 || string(line[:4]) != "Uid:" {
- return nil
+ continue
}
f := strings.Fields(string(line))
if len(f) >= 2 {
userID = f[1] // real userid
}
- return nil
- })
- if os.IsNotExist(err) {
- return "", ErrProcessNotFound
- }
- if err != nil {
- return
}
if userID == "" {
return "", fmt.Errorf("missing Uid line in %s", file)
diff --git a/util/set/slice.go b/util/set/slice.go
index 38551aee197ad..2fc65b82d1c6e 100644
--- a/util/set/slice.go
+++ b/util/set/slice.go
@@ -67,7 +67,7 @@ func (ss *Slice[T]) Add(vs ...T) {
// AddSlice adds all elements in vs to the set.
func (ss *Slice[T]) AddSlice(vs views.Slice[T]) {
- for i := range vs.Len() {
- ss.Add(vs.At(i))
+ for _, v := range vs.All() {
+ ss.Add(v)
}
}
diff --git a/util/syspolicy/caching_handler.go b/util/syspolicy/caching_handler.go
deleted file mode 100644
index 5192958bc45a5..0000000000000
--- a/util/syspolicy/caching_handler.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package syspolicy
-
-import (
- "errors"
- "sync"
-)
-
-// CachingHandler is a handler that reads policies from an underlying handler the first time each key is requested
-// and permanently caches the result unless there is an error. If there is an ErrNoSuchKey error, that result is cached,
-// otherwise the actual error is returned and the next read for that key will retry using the handler.
-type CachingHandler struct {
- mu sync.Mutex
- strings map[string]string
- uint64s map[string]uint64
- bools map[string]bool
- strArrs map[string][]string
- notFound map[string]bool
- handler Handler
-}
-
-// NewCachingHandler creates a CachingHandler given a handler.
-func NewCachingHandler(handler Handler) *CachingHandler {
- return &CachingHandler{
- handler: handler,
- strings: make(map[string]string),
- uint64s: make(map[string]uint64),
- bools: make(map[string]bool),
- strArrs: make(map[string][]string),
- notFound: make(map[string]bool),
- }
-}
-
-// ReadString reads the policy settings value string given the key.
-// ReadString first reads from the handler's cache before resorting to using the handler.
-func (ch *CachingHandler) ReadString(key string) (string, error) {
- ch.mu.Lock()
- defer ch.mu.Unlock()
- if val, ok := ch.strings[key]; ok {
- return val, nil
- }
- if notFound := ch.notFound[key]; notFound {
- return "", ErrNoSuchKey
- }
- val, err := ch.handler.ReadString(key)
- if errors.Is(err, ErrNoSuchKey) {
- ch.notFound[key] = true
- return "", err
- } else if err != nil {
- return "", err
- }
- ch.strings[key] = val
- return val, nil
-}
-
-// ReadUInt64 reads the policy settings uint64 value given the key.
-// ReadUInt64 first reads from the handler's cache before resorting to using the handler.
-func (ch *CachingHandler) ReadUInt64(key string) (uint64, error) {
- ch.mu.Lock()
- defer ch.mu.Unlock()
- if val, ok := ch.uint64s[key]; ok {
- return val, nil
- }
- if notFound := ch.notFound[key]; notFound {
- return 0, ErrNoSuchKey
- }
- val, err := ch.handler.ReadUInt64(key)
- if errors.Is(err, ErrNoSuchKey) {
- ch.notFound[key] = true
- return 0, err
- } else if err != nil {
- return 0, err
- }
- ch.uint64s[key] = val
- return val, nil
-}
-
-// ReadBoolean reads the policy settings boolean value given the key.
-// ReadBoolean first reads from the handler's cache before resorting to using the handler.
-func (ch *CachingHandler) ReadBoolean(key string) (bool, error) {
- ch.mu.Lock()
- defer ch.mu.Unlock()
- if val, ok := ch.bools[key]; ok {
- return val, nil
- }
- if notFound := ch.notFound[key]; notFound {
- return false, ErrNoSuchKey
- }
- val, err := ch.handler.ReadBoolean(key)
- if errors.Is(err, ErrNoSuchKey) {
- ch.notFound[key] = true
- return false, err
- } else if err != nil {
- return false, err
- }
- ch.bools[key] = val
- return val, nil
-}
-
-// ReadBoolean reads the policy settings boolean value given the key.
-// ReadBoolean first reads from the handler's cache before resorting to using the handler.
-func (ch *CachingHandler) ReadStringArray(key string) ([]string, error) {
- ch.mu.Lock()
- defer ch.mu.Unlock()
- if val, ok := ch.strArrs[key]; ok {
- return val, nil
- }
- if notFound := ch.notFound[key]; notFound {
- return nil, ErrNoSuchKey
- }
- val, err := ch.handler.ReadStringArray(key)
- if errors.Is(err, ErrNoSuchKey) {
- ch.notFound[key] = true
- return nil, err
- } else if err != nil {
- return nil, err
- }
- ch.strArrs[key] = val
- return val, nil
-}
diff --git a/util/syspolicy/caching_handler_test.go b/util/syspolicy/caching_handler_test.go
deleted file mode 100644
index 881f6ff83c0f8..0000000000000
--- a/util/syspolicy/caching_handler_test.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package syspolicy
-
-import (
- "testing"
-)
-
-func TestHandlerReadString(t *testing.T) {
- tests := []struct {
- name string
- key string
- handlerKey Key
- handlerValue string
- handlerError error
- preserveHandler bool
- wantValue string
- wantErr error
- strings map[string]string
- expectedCalls int
- }{
- {
- name: "read existing cached values",
- key: "test",
- handlerKey: "do not read",
- strings: map[string]string{"test": "foo"},
- wantValue: "foo",
- expectedCalls: 0,
- },
- {
- name: "read existing values not cached",
- key: "test",
- handlerKey: "test",
- handlerValue: "foo",
- wantValue: "foo",
- expectedCalls: 1,
- },
- {
- name: "error no such key",
- key: "test",
- handlerKey: "test",
- handlerError: ErrNoSuchKey,
- wantErr: ErrNoSuchKey,
- expectedCalls: 1,
- },
- {
- name: "other error",
- key: "test",
- handlerKey: "test",
- handlerError: someOtherError,
- wantErr: someOtherError,
- preserveHandler: true,
- expectedCalls: 2,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testHandler := &testHandler{
- t: t,
- key: tt.handlerKey,
- s: tt.handlerValue,
- err: tt.handlerError,
- }
- cache := NewCachingHandler(testHandler)
- if tt.strings != nil {
- cache.strings = tt.strings
- }
- got, err := cache.ReadString(tt.key)
- if err != tt.wantErr {
- t.Errorf("err=%v want %v", err, tt.wantErr)
- }
- if got != tt.wantValue {
- t.Errorf("got %v want %v", got, cache.strings[tt.key])
- }
- if !tt.preserveHandler {
- testHandler.key, testHandler.s, testHandler.err = "do not read", "", nil
- }
- got, err = cache.ReadString(tt.key)
- if err != tt.wantErr {
- t.Errorf("repeat err=%v want %v", err, tt.wantErr)
- }
- if got != tt.wantValue {
- t.Errorf("repeat got %v want %v", got, cache.strings[tt.key])
- }
- if testHandler.calls != tt.expectedCalls {
- t.Errorf("calls=%v want %v", testHandler.calls, tt.expectedCalls)
- }
- })
- }
-}
-
-func TestHandlerReadUint64(t *testing.T) {
- tests := []struct {
- name string
- key string
- handlerKey Key
- handlerValue uint64
- handlerError error
- preserveHandler bool
- wantValue uint64
- wantErr error
- uint64s map[string]uint64
- expectedCalls int
- }{
- {
- name: "read existing cached values",
- key: "test",
- handlerKey: "do not read",
- uint64s: map[string]uint64{"test": 1},
- wantValue: 1,
- expectedCalls: 0,
- },
- {
- name: "read existing values not cached",
- key: "test",
- handlerKey: "test",
- handlerValue: 1,
- wantValue: 1,
- expectedCalls: 1,
- },
- {
- name: "error no such key",
- key: "test",
- handlerKey: "test",
- handlerError: ErrNoSuchKey,
- wantErr: ErrNoSuchKey,
- expectedCalls: 1,
- },
- {
- name: "other error",
- key: "test",
- handlerKey: "test",
- handlerError: someOtherError,
- wantErr: someOtherError,
- preserveHandler: true,
- expectedCalls: 2,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testHandler := &testHandler{
- t: t,
- key: tt.handlerKey,
- u64: tt.handlerValue,
- err: tt.handlerError,
- }
- cache := NewCachingHandler(testHandler)
- if tt.uint64s != nil {
- cache.uint64s = tt.uint64s
- }
- got, err := cache.ReadUInt64(tt.key)
- if err != tt.wantErr {
- t.Errorf("err=%v want %v", err, tt.wantErr)
- }
- if got != tt.wantValue {
- t.Errorf("got %v want %v", got, cache.strings[tt.key])
- }
- if !tt.preserveHandler {
- testHandler.key, testHandler.s, testHandler.err = "do not read", "", nil
- }
- got, err = cache.ReadUInt64(tt.key)
- if err != tt.wantErr {
- t.Errorf("repeat err=%v want %v", err, tt.wantErr)
- }
- if got != tt.wantValue {
- t.Errorf("repeat got %v want %v", got, cache.strings[tt.key])
- }
- if testHandler.calls != tt.expectedCalls {
- t.Errorf("calls=%v want %v", testHandler.calls, tt.expectedCalls)
- }
- })
- }
-
-}
-
-func TestHandlerReadBool(t *testing.T) {
- tests := []struct {
- name string
- key string
- handlerKey Key
- handlerValue bool
- handlerError error
- preserveHandler bool
- wantValue bool
- wantErr error
- bools map[string]bool
- expectedCalls int
- }{
- {
- name: "read existing cached values",
- key: "test",
- handlerKey: "do not read",
- bools: map[string]bool{"test": true},
- wantValue: true,
- expectedCalls: 0,
- },
- {
- name: "read existing values not cached",
- key: "test",
- handlerKey: "test",
- handlerValue: true,
- wantValue: true,
- expectedCalls: 1,
- },
- {
- name: "error no such key",
- key: "test",
- handlerKey: "test",
- handlerError: ErrNoSuchKey,
- wantErr: ErrNoSuchKey,
- expectedCalls: 1,
- },
- {
- name: "other error",
- key: "test",
- handlerKey: "test",
- handlerError: someOtherError,
- wantErr: someOtherError,
- preserveHandler: true,
- expectedCalls: 2,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- testHandler := &testHandler{
- t: t,
- key: tt.handlerKey,
- b: tt.handlerValue,
- err: tt.handlerError,
- }
- cache := NewCachingHandler(testHandler)
- if tt.bools != nil {
- cache.bools = tt.bools
- }
- got, err := cache.ReadBoolean(tt.key)
- if err != tt.wantErr {
- t.Errorf("err=%v want %v", err, tt.wantErr)
- }
- if got != tt.wantValue {
- t.Errorf("got %v want %v", got, cache.strings[tt.key])
- }
- if !tt.preserveHandler {
- testHandler.key, testHandler.s, testHandler.err = "do not read", "", nil
- }
- got, err = cache.ReadBoolean(tt.key)
- if err != tt.wantErr {
- t.Errorf("repeat err=%v want %v", err, tt.wantErr)
- }
- if got != tt.wantValue {
- t.Errorf("repeat got %v want %v", got, cache.strings[tt.key])
- }
- if testHandler.calls != tt.expectedCalls {
- t.Errorf("calls=%v want %v", testHandler.calls, tt.expectedCalls)
- }
- })
- }
-
-}
diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go
index f1fad97709a3f..f511f0a562e8b 100644
--- a/util/syspolicy/handler.go
+++ b/util/syspolicy/handler.go
@@ -4,16 +4,17 @@
package syspolicy
import (
- "errors"
- "sync/atomic"
+ "tailscale.com/util/syspolicy/internal"
+ "tailscale.com/util/syspolicy/rsop"
+ "tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
)
-var (
- handlerUsed atomic.Bool
- handler Handler = defaultHandler{}
-)
+// TODO(nickkhyl): delete this file once other repos are updated.
// Handler reads system policies from OS-specific storage.
+//
+// Deprecated: implementing a [source.Store] should be preferred.
type Handler interface {
// ReadString reads the policy setting's string value for the given key.
// It should return ErrNoSuchKey if the key does not have a value set.
@@ -29,55 +30,88 @@ type Handler interface {
ReadStringArray(key string) ([]string, error)
}
-// ErrNoSuchKey is returned by a Handler when the specified key does not have a
-// value set.
-var ErrNoSuchKey = errors.New("no such key")
+// RegisterHandler wraps and registers the specified handler as the device's
+// policy [source.Store] for the program's lifetime.
+//
+// Deprecated: using [RegisterStore] should be preferred.
+func RegisterHandler(h Handler) {
+ rsop.RegisterStore("DeviceHandler", setting.DeviceScope, WrapHandler(h))
+}
-// defaultHandler is the catch all syspolicy type for anything that isn't windows or apple.
-type defaultHandler struct{}
+// TB is a subset of testing.TB that we use to set up test helpers.
+// It's defined here to avoid pulling in the testing package.
+type TB = internal.TB
-func (defaultHandler) ReadString(_ string) (string, error) {
- return "", ErrNoSuchKey
+// SetHandlerForTest wraps and sets the specified handler as the device's policy
+// [source.Store] for the duration of tb.
+//
+// Deprecated: using [MustRegisterStoreForTest] should be preferred.
+func SetHandlerForTest(tb TB, h Handler) {
+ RegisterWellKnownSettingsForTest(tb)
+ MustRegisterStoreForTest(tb, "DeviceHandler-TestOnly", setting.DefaultScope(), WrapHandler(h))
}
-func (defaultHandler) ReadUInt64(_ string) (uint64, error) {
- return 0, ErrNoSuchKey
+var _ source.Store = (*handlerStore)(nil)
+
+// handlerStore is a [source.Store] that calls the underlying [Handler].
+//
+// TODO(nickkhyl): remove it when the corp and android repos are updated.
+type handlerStore struct {
+ h Handler
}
-func (defaultHandler) ReadBoolean(_ string) (bool, error) {
- return false, ErrNoSuchKey
+// WrapHandler returns a [source.Store] that wraps the specified [Handler].
+func WrapHandler(h Handler) source.Store {
+ return handlerStore{h}
}
-func (defaultHandler) ReadStringArray(_ string) ([]string, error) {
- return nil, ErrNoSuchKey
+// Lock implements [source.Lockable].
+func (s handlerStore) Lock() error {
+ if lockable, ok := s.h.(source.Lockable); ok {
+ return lockable.Lock()
+ }
+ return nil
}
-// markHandlerInUse is called before handler methods are called.
-func markHandlerInUse() {
- handlerUsed.Store(true)
+// Unlock implements [source.Lockable].
+func (s handlerStore) Unlock() {
+ if lockable, ok := s.h.(source.Lockable); ok {
+ lockable.Unlock()
+ }
}
-// RegisterHandler initializes the policy handler and ensures registration will happen once.
-func RegisterHandler(h Handler) {
- // Technically this assignment is not concurrency safe, but in the
- // event that there was any risk of a data race, we will panic due to
- // the CompareAndSwap failing.
- handler = h
- if !handlerUsed.CompareAndSwap(false, true) {
- panic("handler was already used before registration")
+// RegisterChangeCallback implements [source.Changeable].
+func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func(), err error) {
+ if changeable, ok := s.h.(source.Changeable); ok {
+ return changeable.RegisterChangeCallback(callback)
}
+ return func() {}, nil
}
-// TB is a subset of testing.TB that we use to set up test helpers.
-// It's defined here to avoid pulling in the testing package.
-type TB interface {
- Helper()
- Cleanup(func())
+// ReadString implements [source.Store].
+func (s handlerStore) ReadString(key setting.Key) (string, error) {
+ return s.h.ReadString(string(key))
}
-func SetHandlerForTest(tb TB, h Handler) {
- tb.Helper()
- oldHandler := handler
- handler = h
- tb.Cleanup(func() { handler = oldHandler })
+// ReadUInt64 implements [source.Store].
+func (s handlerStore) ReadUInt64(key setting.Key) (uint64, error) {
+ return s.h.ReadUInt64(string(key))
+}
+
+// ReadBoolean implements [source.Store].
+func (s handlerStore) ReadBoolean(key setting.Key) (bool, error) {
+ return s.h.ReadBoolean(string(key))
+}
+
+// ReadStringArray implements [source.Store].
+func (s handlerStore) ReadStringArray(key setting.Key) ([]string, error) {
+ return s.h.ReadStringArray(string(key))
+}
+
+// Done implements [source.Expirable].
+func (s handlerStore) Done() <-chan struct{} {
+ if expirable, ok := s.h.(source.Expirable); ok {
+ return expirable.Done()
+ }
+ return nil
}
diff --git a/util/syspolicy/handler_test.go b/util/syspolicy/handler_test.go
deleted file mode 100644
index 39b18936f176d..0000000000000
--- a/util/syspolicy/handler_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package syspolicy
-
-import "testing"
-
-func TestDefaultHandlerReadValues(t *testing.T) {
- var h defaultHandler
-
- got, err := h.ReadString(string(AdminConsoleVisibility))
- if got != "" || err != ErrNoSuchKey {
- t.Fatalf("got %v err %v", got, err)
- }
- result, err := h.ReadUInt64(string(LogSCMInteractions))
- if result != 0 || err != ErrNoSuchKey {
- t.Fatalf("got %v err %v", result, err)
- }
-}
diff --git a/util/syspolicy/handler_windows.go b/util/syspolicy/handler_windows.go
deleted file mode 100644
index 661853ead5d53..0000000000000
--- a/util/syspolicy/handler_windows.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package syspolicy
-
-import (
- "errors"
- "fmt"
-
- "tailscale.com/util/clientmetric"
- "tailscale.com/util/winutil"
-)
-
-var (
- windowsErrors = clientmetric.NewCounter("windows_syspolicy_errors")
- windowsAny = clientmetric.NewGauge("windows_syspolicy_any")
-)
-
-type windowsHandler struct{}
-
-func init() {
- RegisterHandler(NewCachingHandler(windowsHandler{}))
-
- keyList := []struct {
- isSet func(Key) bool
- keys []Key
- }{
- {
- isSet: func(k Key) bool {
- _, err := handler.ReadString(string(k))
- return err == nil
- },
- keys: stringKeys,
- },
- {
- isSet: func(k Key) bool {
- _, err := handler.ReadBoolean(string(k))
- return err == nil
- },
- keys: boolKeys,
- },
- {
- isSet: func(k Key) bool {
- _, err := handler.ReadUInt64(string(k))
- return err == nil
- },
- keys: uint64Keys,
- },
- }
-
- var anySet bool
- for _, l := range keyList {
- for _, k := range l.keys {
- if !l.isSet(k) {
- continue
- }
- clientmetric.NewGauge(fmt.Sprintf("windows_syspolicy_%s", k)).Set(1)
- anySet = true
- }
- }
- if anySet {
- windowsAny.Set(1)
- }
-}
-
-func (windowsHandler) ReadString(key string) (string, error) {
- s, err := winutil.GetPolicyString(key)
- if errors.Is(err, winutil.ErrNoValue) {
- err = ErrNoSuchKey
- } else if err != nil {
- windowsErrors.Add(1)
- }
-
- return s, err
-}
-
-func (windowsHandler) ReadUInt64(key string) (uint64, error) {
- value, err := winutil.GetPolicyInteger(key)
- if errors.Is(err, winutil.ErrNoValue) {
- err = ErrNoSuchKey
- } else if err != nil {
- windowsErrors.Add(1)
- }
- return value, err
-}
-
-func (windowsHandler) ReadBoolean(key string) (bool, error) {
- value, err := winutil.GetPolicyInteger(key)
- if errors.Is(err, winutil.ErrNoValue) {
- err = ErrNoSuchKey
- } else if err != nil {
- windowsErrors.Add(1)
- }
- return value != 0, err
-}
-
-func (windowsHandler) ReadStringArray(key string) ([]string, error) {
- value, err := winutil.GetPolicyStringArray(key)
- if errors.Is(err, winutil.ErrNoValue) {
- err = ErrNoSuchKey
- } else if err != nil {
- windowsErrors.Add(1)
- }
- return value, err
-}
diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go
index 4c3e28d3914bb..8f28896259abf 100644
--- a/util/syspolicy/internal/internal.go
+++ b/util/syspolicy/internal/internal.go
@@ -13,6 +13,9 @@ import (
"tailscale.com/version"
)
+// Init facilitates deferred invocation of initializers.
+var Init lazy.DeferredInit
+
// OSForTesting is the operating system override used for testing.
// It follows the same naming convention as [version.OS].
var OSForTesting lazy.SyncValue[string]
diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go
index 2ea02278afc92..0a2aa1192fc53 100644
--- a/util/syspolicy/internal/metrics/metrics.go
+++ b/util/syspolicy/internal/metrics/metrics.go
@@ -284,7 +284,7 @@ func SetHooksForTest(tb internal.TB, addMetric, setMetric metricFn) {
}
func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric {
- name := strings.ReplaceAll(string(key), setting.KeyPathSeparator, "_")
+ name := strings.ReplaceAll(string(key), string(setting.KeyPathSeparator), "_")
return newMetric([]string{name, metricScopeName(scope), suffix}, typ)
}
diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go
index ec0556a942cc6..bb9a5d6cc5934 100644
--- a/util/syspolicy/policy_keys.go
+++ b/util/syspolicy/policy_keys.go
@@ -3,10 +3,24 @@
package syspolicy
-import "tailscale.com/util/syspolicy/setting"
+import (
+ "tailscale.com/types/lazy"
+ "tailscale.com/util/syspolicy/internal"
+ "tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/testenv"
+)
+// Key is a string that uniquely identifies a policy and must remain unchanged
+// once established and documented for a given policy setting. It may contain
+// alphanumeric characters and zero or more [KeyPathSeparator]s to group
+// individual policy settings into categories.
type Key = setting.Key
+// The const block below lists known policy keys.
+// When adding a key to this list, remember to add a corresponding
+// [setting.Definition] to [implicitDefinitions] below.
+// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder.
+
const (
// Keys with a string value
ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL.
@@ -63,6 +77,9 @@ const (
// SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI.
// When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker.
SuggestedExitNodeVisibility Key = "SuggestedExitNode"
+ // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI.
+ // When this system policy is set to 'hide', the onboarding flow is never shown to the user.
+ OnboardingFlowVisibility Key = "OnboardingFlow"
// Keys with a string value formatted for use with time.ParseDuration().
KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours
@@ -110,3 +127,91 @@ const (
// AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes.
AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes"
)
+
+// implicitDefinitions is a list of [setting.Definition] that will be registered
+// automatically when the policy setting definitions are first used by the syspolicy package hierarchy.
+// This includes the first time a policy needs to be read from any source.
+var implicitDefinitions = []*setting.Definition{
+ // Device policy settings (can only be configured on a per-device basis):
+ setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue),
+ setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(ControlURL, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(DeviceSerialNumber, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue),
+ setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue),
+ setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue),
+ setting.NewDefinition(PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue),
+ setting.NewDefinition(Tailnet, setting.DeviceSetting, setting.StringValue),
+
+ // User policy settings (can be configured on a user- or device-basis):
+ setting.NewDefinition(AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue),
+ setting.NewDefinition(ManagedByCaption, setting.UserSetting, setting.StringValue),
+ setting.NewDefinition(ManagedByOrganizationName, setting.UserSetting, setting.StringValue),
+ setting.NewDefinition(ManagedByURL, setting.UserSetting, setting.StringValue),
+ setting.NewDefinition(NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(TestMenuVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue),
+ setting.NewDefinition(OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue),
+}
+
+func init() {
+ internal.Init.MustDefer(func() error {
+ // Avoid implicit [setting.Definition] registration during tests.
+ // Each test should control which policy settings to register.
+ // Use [setting.SetDefinitionsForTest] to specify necessary definitions,
+ // or [setWellKnownSettingsForTest] to set implicit definitions for the test duration.
+ if testenv.InTest() {
+ return nil
+ }
+ for _, d := range implicitDefinitions {
+ setting.RegisterDefinition(d)
+ }
+ return nil
+ })
+}
+
+var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap]
+
+// WellKnownSettingDefinition returns a well-known, implicit setting definition by its key,
+// or an [ErrNoSuchKey] if a policy setting with the specified key does not exist
+// among implicit policy definitions.
+func WellKnownSettingDefinition(k Key) (*setting.Definition, error) {
+ m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) {
+ return setting.DefinitionMapOf(implicitDefinitions)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if d, ok := m[k]; ok {
+ return d, nil
+ }
+ return nil, ErrNoSuchKey
+}
+
+// RegisterWellKnownSettingsForTest registers all implicit setting definitions
+// for the duration of the test.
+func RegisterWellKnownSettingsForTest(tb TB) {
+ tb.Helper()
+ err := setting.SetDefinitionsForTest(tb, implicitDefinitions...)
+ if err != nil {
+ tb.Fatalf("Failed to register well-known settings: %v", err)
+ }
+}
diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go
new file mode 100644
index 0000000000000..4d3260f3e0e60
--- /dev/null
+++ b/util/syspolicy/policy_keys_test.go
@@ -0,0 +1,95 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package syspolicy
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "reflect"
+ "strconv"
+ "testing"
+
+ "tailscale.com/util/syspolicy/setting"
+)
+
+func TestKnownKeysRegistered(t *testing.T) {
+ keyConsts, err := listStringConsts[Key]("policy_keys.go")
+ if err != nil {
+ t.Fatalf("listStringConsts failed: %v", err)
+ }
+
+ m, err := setting.DefinitionMapOf(implicitDefinitions)
+ if err != nil {
+ t.Fatalf("definitionMapOf failed: %v", err)
+ }
+
+ for _, key := range keyConsts {
+ t.Run(string(key), func(t *testing.T) {
+ d := m[key]
+ if d == nil {
+ t.Fatalf("%q was not registered", key)
+ }
+ if d.Key() != key {
+ t.Fatalf("d.Key got: %s, want %s", d.Key(), key)
+ }
+ })
+ }
+}
+
+func TestNotAWellKnownSetting(t *testing.T) {
+ d, err := WellKnownSettingDefinition("TestSettingDoesNotExist")
+ if d != nil || err == nil {
+ t.Fatalf("got %v, %v; want nil, %v", d, err, ErrNoSuchKey)
+ }
+}
+
+func listStringConsts[T ~string](filename string) (map[string]T, error) {
+ fset := token.NewFileSet()
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := parser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ consts := make(map[string]T)
+ typeName := reflect.TypeFor[T]().Name()
+ for _, d := range f.Decls {
+ g, ok := d.(*ast.GenDecl)
+ if !ok || g.Tok != token.CONST {
+ continue
+ }
+
+ for _, s := range g.Specs {
+ vs, ok := s.(*ast.ValueSpec)
+ if !ok || len(vs.Names) != len(vs.Values) {
+ continue
+ }
+ if typ, ok := vs.Type.(*ast.Ident); !ok || typ.Name != typeName {
+ continue
+ }
+
+ for i, n := range vs.Names {
+ lit, ok := vs.Values[i].(*ast.BasicLit)
+ if !ok {
+ return nil, fmt.Errorf("unexpected string literal: %v = %v", n.Name, types.ExprString(vs.Values[i]))
+ }
+ val, err := strconv.Unquote(lit.Value)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected string literal: %v = %v", n.Name, lit.Value)
+ }
+ consts[n.Name] = T(val)
+ }
+ }
+ }
+
+ return consts, nil
+}
diff --git a/util/syspolicy/policy_keys_windows.go b/util/syspolicy/policy_keys_windows.go
deleted file mode 100644
index 5e9a716957bdb..0000000000000
--- a/util/syspolicy/policy_keys_windows.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) Tailscale Inc & AUTHORS
-// SPDX-License-Identifier: BSD-3-Clause
-
-package syspolicy
-
-var stringKeys = []Key{
- ControlURL,
- LogTarget,
- Tailnet,
- ExitNodeID,
- ExitNodeIP,
- EnableIncomingConnections,
- EnableServerMode,
- ExitNodeAllowLANAccess,
- EnableTailscaleDNS,
- EnableTailscaleSubnets,
- AdminConsoleVisibility,
- NetworkDevicesVisibility,
- TestMenuVisibility,
- UpdateMenuVisibility,
- RunExitNodeVisibility,
- PreferencesMenuVisibility,
- ExitNodeMenuVisibility,
- AutoUpdateVisibility,
- ResetToDefaultsVisibility,
- KeyExpirationNoticeTime,
- PostureChecking,
- ManagedByOrganizationName,
- ManagedByCaption,
- ManagedByURL,
-}
-
-var boolKeys = []Key{
- LogSCMInteractions,
- FlushDNSOnSessionUnlock,
-}
-
-var uint64Keys = []Key{}
diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go
new file mode 100644
index 0000000000000..b962f30c008c1
--- /dev/null
+++ b/util/syspolicy/rsop/change_callbacks.go
@@ -0,0 +1,107 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package rsop
+
+import (
+ "reflect"
+ "slices"
+ "sync"
+ "time"
+
+ "tailscale.com/util/set"
+ "tailscale.com/util/syspolicy/internal/loggerx"
+ "tailscale.com/util/syspolicy/setting"
+)
+
+// Change represents a change from the Old to the New value of type T.
+type Change[T any] struct {
+ New, Old T
+}
+
+// PolicyChangeCallback is a function called whenever a policy changes.
+type PolicyChangeCallback func(*PolicyChange)
+
+// PolicyChange describes a policy change.
+type PolicyChange struct {
+ snapshots Change[*setting.Snapshot]
+}
+
+// New returns the [setting.Snapshot] after the change.
+func (c PolicyChange) New() *setting.Snapshot {
+ return c.snapshots.New
+}
+
+// Old returns the [setting.Snapshot] before the change.
+func (c PolicyChange) Old() *setting.Snapshot {
+ return c.snapshots.Old
+}
+
+// HasChanged reports whether a policy setting with the specified [setting.Key], has changed.
+func (c PolicyChange) HasChanged(key setting.Key) bool {
+ new, newErr := c.snapshots.New.GetErr(key)
+ old, oldErr := c.snapshots.Old.GetErr(key)
+ if newErr != nil && oldErr != nil {
+ return false
+ }
+ if newErr != nil || oldErr != nil {
+ return true
+ }
+ switch newVal := new.(type) {
+ case bool, uint64, string, setting.Visibility, setting.PreferenceOption, time.Duration:
+ return newVal != old
+ case []string:
+ oldVal, ok := old.([]string)
+ return !ok || !slices.Equal(newVal, oldVal)
+ default:
+ loggerx.Errorf("[unexpected] %q has an unsupported value type: %T", key, newVal)
+ return !reflect.DeepEqual(new, old)
+ }
+}
+
+// policyChangeCallbacks are the callbacks to invoke when the effective policy changes.
+// It is safe for concurrent use.
+type policyChangeCallbacks struct {
+ mu sync.Mutex
+ cbs set.HandleSet[PolicyChangeCallback]
+}
+
+// Register adds the specified callback to be invoked whenever the policy changes.
+func (c *policyChangeCallbacks) Register(callback PolicyChangeCallback) (unregister func()) {
+ c.mu.Lock()
+ handle := c.cbs.Add(callback)
+ c.mu.Unlock()
+ return func() {
+ c.mu.Lock()
+ delete(c.cbs, handle)
+ c.mu.Unlock()
+ }
+}
+
+// Invoke calls the registered callback functions with the specified policy change info.
+func (c *policyChangeCallbacks) Invoke(snapshots Change[*setting.Snapshot]) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ wg.Add(len(c.cbs))
+ change := &PolicyChange{snapshots: snapshots}
+ for _, cb := range c.cbs {
+ go func() {
+ defer wg.Done()
+ cb(change)
+ }()
+ }
+}
+
+// Close awaits the completion of active callbacks and prevents any further invocations.
+func (c *policyChangeCallbacks) Close() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.cbs != nil {
+ clear(c.cbs)
+ c.cbs = nil
+ }
+}
diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go
new file mode 100644
index 0000000000000..b811a00eed77b
--- /dev/null
+++ b/util/syspolicy/rsop/resultant_policy.go
@@ -0,0 +1,456 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package rsop
+
+import (
+ "errors"
+ "fmt"
+ "slices"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "tailscale.com/util/syspolicy/internal"
+ "tailscale.com/util/syspolicy/internal/loggerx"
+ "tailscale.com/util/syspolicy/setting"
+
+ "tailscale.com/util/syspolicy/source"
+)
+
+// ErrPolicyClosed is returned by [Policy.Reload], [Policy.addSource],
+// [Policy.removeSource] and [Policy.replaceSource] if the policy has been closed.
+var ErrPolicyClosed = errors.New("effective policy closed")
+
+// The minimum and maximum wait times after detecting a policy change
+// before reloading the policy. This only affects policy reloads triggered
+// by a change in the underlying [source.Store] and does not impact
+// synchronous, caller-initiated reloads, such as when [Policy.Reload] is called.
+//
+// Policy changes occurring within [policyReloadMinDelay] of each other
+// will be batched together, resulting in a single policy reload
+// no later than [policyReloadMaxDelay] after the first detected change.
+// In other words, the effective policy will be reloaded no more often than once
+// every 5 seconds, but at most 15 seconds after an underlying [source.Store]
+// has issued a policy change callback.
+//
+// See [Policy.watchReload].
+var (
+ policyReloadMinDelay = 5 * time.Second
+ policyReloadMaxDelay = 15 * time.Second
+)
+
+// Policy provides access to the current effective [setting.Snapshot] for a given
+// scope and allows to reload it from the underlying [source.Store] list. It also allows to
+// subscribe and receive a callback whenever the effective [setting.Snapshot] is changed.
+//
+// It is safe for concurrent use.
+type Policy struct {
+ scope setting.PolicyScope
+
+ reloadCh chan reloadRequest // 1-buffered; written to when a policy reload is required
+ closeCh chan struct{} // closed to signal that the Policy is being closed
+ doneCh chan struct{} // closed by [Policy.closeInternal]
+
+ // effective is the most recent version of the [setting.Snapshot]
+ // containing policy settings merged from all applicable sources.
+ effective atomic.Pointer[setting.Snapshot]
+
+ changeCallbacks policyChangeCallbacks
+
+ mu sync.Mutex
+ watcherStarted bool // whether [Policy.watchReload] was started
+ sources source.ReadableSources
+ closing bool // whether [Policy.Close] was called (even if we're still closing)
+}
+
+// newPolicy returns a new [Policy] for the specified [setting.PolicyScope]
+// that tracks changes and merges policy settings read from the specified sources.
+func newPolicy(scope setting.PolicyScope, sources ...*source.Source) (_ *Policy, err error) {
+ readableSources := make(source.ReadableSources, 0, len(sources))
+ defer func() {
+ if err != nil {
+ readableSources.Close()
+ }
+ }()
+ for _, s := range sources {
+ reader, err := s.Reader()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get a store reader: %w", err)
+ }
+ session, err := reader.OpenSession()
+ if err != nil {
+ return nil, fmt.Errorf("failed to open a reading session: %w", err)
+ }
+ readableSources = append(readableSources, source.ReadableSource{Source: s, ReadingSession: session})
+ }
+
+ // Sort policy sources by their precedence from lower to higher.
+ // For example, {UserPolicy},{ProfilePolicy},{DevicePolicy}.
+ readableSources.StableSort()
+
+ p := &Policy{
+ scope: scope,
+ sources: readableSources,
+ reloadCh: make(chan reloadRequest, 1),
+ closeCh: make(chan struct{}),
+ doneCh: make(chan struct{}),
+ }
+ if _, err := p.reloadNow(false); err != nil {
+ p.Close()
+ return nil, err
+ }
+ p.startWatchReloadIfNeeded()
+ return p, nil
+}
+
+// IsValid reports whether p is in a valid state and has not been closed.
+//
+// Since p's state can be changed by other goroutines at any time, this should
+// only be used as an optimization.
+func (p *Policy) IsValid() bool {
+ select {
+ case <-p.closeCh:
+ return false
+ default:
+ return true
+ }
+}
+
+// Scope returns the [setting.PolicyScope] that this policy applies to.
+func (p *Policy) Scope() setting.PolicyScope {
+ return p.scope
+}
+
+// Get returns the effective [setting.Snapshot].
+func (p *Policy) Get() *setting.Snapshot {
+ return p.effective.Load()
+}
+
+// RegisterChangeCallback adds a function to be called whenever the effective
+// policy changes. The returned function can be used to unregister the callback.
+func (p *Policy) RegisterChangeCallback(callback PolicyChangeCallback) (unregister func()) {
+ return p.changeCallbacks.Register(callback)
+}
+
+// Reload synchronously re-reads policy settings from the underlying list of policy sources,
+// constructing a new merged [setting.Snapshot] even if the policy remains unchanged.
+// In most scenarios, there's no need to re-read the policy manually.
+// Instead, it is recommended to register a policy change callback, or to use
+// the most recent [setting.Snapshot] returned by the [Policy.Get] method.
+//
+// It must not be called with p.mu held.
+func (p *Policy) Reload() (*setting.Snapshot, error) {
+ return p.reload(true)
+}
+
+// reload is like Reload, but allows to specify whether to re-read policy settings
+// from unchanged policy sources.
+//
+// It must not be called with p.mu held.
+func (p *Policy) reload(force bool) (*setting.Snapshot, error) {
+ if !p.startWatchReloadIfNeeded() {
+ return p.Get(), nil
+ }
+
+ respCh := make(chan reloadResponse, 1)
+ select {
+ case p.reloadCh <- reloadRequest{force: force, respCh: respCh}:
+ // continue
+ case <-p.closeCh:
+ return nil, ErrPolicyClosed
+ }
+ select {
+ case resp := <-respCh:
+ return resp.policy, resp.err
+ case <-p.closeCh:
+ return nil, ErrPolicyClosed
+ }
+}
+
+// reloadAsync requests an asynchronous background policy reload.
+// The policy will be reloaded no later than in [policyReloadMaxDelay].
+//
+// It must not be called with p.mu held.
+func (p *Policy) reloadAsync() {
+ if !p.startWatchReloadIfNeeded() {
+ return
+ }
+ select {
+ case p.reloadCh <- reloadRequest{}:
+ // Sent.
+ default:
+ // A reload request is already en route.
+ }
+}
+
+// reloadNow loads and merges policies from all sources, updating the effective policy.
+// If the force parameter is true, it forcibly reloads policies
+// from the underlying policy store, even if no policy changes were detected.
+//
+// Except for the initial policy reload during the [Policy] creation,
+// this method should only be called from the [Policy.watchReload] goroutine.
+func (p *Policy) reloadNow(force bool) (*setting.Snapshot, error) {
+ new, err := p.readAndMerge(force)
+ if err != nil {
+ return nil, err
+ }
+ old := p.effective.Swap(new)
+ // A nil old value indicates the initial policy load rather than a policy change.
+ // Additionally, we should not invoke the policy change callbacks unless the
+ // policy items have actually changed.
+ if old != nil && !old.EqualItems(new) {
+ snapshots := Change[*setting.Snapshot]{New: new, Old: old}
+ p.changeCallbacks.Invoke(snapshots)
+ }
+ return new, nil
+}
+
+// Done returns a channel that is closed when the [Policy] is closed.
+func (p *Policy) Done() <-chan struct{} {
+ return p.doneCh
+}
+
+// readAndMerge reads and merges policy settings from all applicable sources,
+// returning a [setting.Snapshot] with the merged result.
+// If the force parameter is true, it re-reads policy settings from each source
+// even if no policy change was observed, and returns an error if the read
+// operation fails.
+func (p *Policy) readAndMerge(force bool) (*setting.Snapshot, error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // Start with an empty policy in the target scope.
+ effective := setting.NewSnapshot(nil, setting.SummaryWith(p.scope))
+ // Then merge policy settings from all sources.
+ // Policy sources with the highest precedence (e.g., the device policy) are merged last,
+ // overriding any conflicting policy settings with lower precedence.
+ for _, s := range p.sources {
+ var policy *setting.Snapshot
+ if force {
+ var err error
+ if policy, err = s.ReadSettings(); err != nil {
+ return nil, err
+ }
+ } else {
+ policy = s.GetSettings()
+ }
+ effective = setting.MergeSnapshots(effective, policy)
+ }
+ return effective, nil
+}
+
+// addSource adds the specified source to the list of sources used by p,
+// and triggers a synchronous policy refresh. It returns an error
+// if the source is not a valid source for this effective policy,
+// or if the effective policy is being closed,
+// or if policy refresh fails with an error.
+func (p *Policy) addSource(source *source.Source) error {
+ return p.applySourcesChange(source, nil)
+}
+
+// removeSource removes the specified source from the list of sources used by p,
+// and triggers a synchronous policy refresh. It returns an error if the
+// effective policy is being closed, or if policy refresh fails with an error.
+func (p *Policy) removeSource(source *source.Source) error {
+ return p.applySourcesChange(nil, source)
+}
+
+// replaceSource replaces the old source with the new source atomically,
+// and triggers a synchronous policy refresh. It returns an error
+// if the source is not a valid source for this effective policy,
+// or if the effective policy is being closed,
+// or if policy refresh fails with an error.
+func (p *Policy) replaceSource(old, new *source.Source) error {
+ return p.applySourcesChange(new, old)
+}
+
+func (p *Policy) applySourcesChange(toAdd, toRemove *source.Source) error {
+ if toAdd == toRemove {
+ return nil
+ }
+ if toAdd != nil && !toAdd.Scope().Contains(p.scope) {
+ return errors.New("scope mismatch")
+ }
+
+ changed, err := func() (changed bool, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if toAdd != nil && !p.sources.Contains(toAdd) {
+ reader, err := toAdd.Reader()
+ if err != nil {
+ return false, fmt.Errorf("failed to get a store reader: %w", err)
+ }
+ session, err := reader.OpenSession()
+ if err != nil {
+ return false, fmt.Errorf("failed to open a reading session: %w", err)
+ }
+
+ addAt := p.sources.InsertionIndexOf(toAdd)
+ toAdd := source.ReadableSource{
+ Source: toAdd,
+ ReadingSession: session,
+ }
+ p.sources = slices.Insert(p.sources, addAt, toAdd)
+ go p.watchPolicyChanges(toAdd)
+ changed = true
+ }
+ if toRemove != nil {
+ if deleteAt := p.sources.IndexOf(toRemove); deleteAt != -1 {
+ p.sources.DeleteAt(deleteAt)
+ changed = true
+ }
+ }
+ return changed, nil
+ }()
+ if changed {
+ _, err = p.reload(false)
+ }
+ return err // may be nil or non-nil
+}
+
+func (p *Policy) watchPolicyChanges(s source.ReadableSource) {
+ for {
+ select {
+ case _, ok := <-s.ReadingSession.PolicyChanged():
+ if !ok {
+ p.mu.Lock()
+ abruptlyClosed := slices.Contains(p.sources, s)
+ p.mu.Unlock()
+ if abruptlyClosed {
+ // The underlying [source.Source] was closed abruptly without
+ // being properly removed or replaced by another policy source.
+ // We can't keep this [Policy] up to date, so we should close it.
+ p.Close()
+ }
+ return
+ }
+ // The PolicyChanged channel was signaled.
+ // Request an asynchronous policy reload.
+ p.reloadAsync()
+ case <-p.closeCh:
+ // The [Policy] is being closed.
+ return
+ }
+ }
+}
+
+// startWatchReloadIfNeeded starts [Policy.watchReload] in a new goroutine
+// if the list of policy sources is not empty, it hasn't been started yet,
+// and the [Policy] is not being closed.
+// It reports whether [Policy.watchReload] has ever been started.
+//
+// It must not be called with p.mu held.
+func (p *Policy) startWatchReloadIfNeeded() bool {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if len(p.sources) != 0 && !p.watcherStarted && !p.closing {
+ go p.watchReload()
+ for i := range p.sources {
+ go p.watchPolicyChanges(p.sources[i])
+ }
+ p.watcherStarted = true
+ }
+ return p.watcherStarted
+}
+
+// reloadRequest describes a policy reload request.
+type reloadRequest struct {
+ // force policy reload regardless of whether a policy change was detected.
+ force bool
+ // respCh is an optional channel. If non-nil, it makes the reload request
+ // synchronous and receives the result.
+ respCh chan<- reloadResponse
+}
+
+// reloadResponse is a result of a synchronous policy reload.
+type reloadResponse struct {
+ policy *setting.Snapshot
+ err error
+}
+
+// watchReload processes incoming synchronous and asynchronous policy reload requests.
+//
+// Synchronous requests (with a non-nil respCh) are served immediately.
+//
+// Asynchronous requests are debounced and throttled: they are executed at least
+// [policyReloadMinDelay] after the last request, but no later than [policyReloadMaxDelay]
+// after the first request in a batch.
+func (p *Policy) watchReload() {
+ defer p.closeInternal()
+
+ force := false // whether a forced refresh was requested
+ var delayCh, timeoutCh <-chan time.Time
+ reload := func(respCh chan<- reloadResponse) {
+ delayCh, timeoutCh = nil, nil
+ policy, err := p.reloadNow(force)
+ if err != nil {
+ loggerx.Errorf("%v policy reload failed: %v\n", p.scope, err)
+ }
+ if respCh != nil {
+ respCh <- reloadResponse{policy: policy, err: err}
+ }
+ force = false
+ }
+
+loop:
+ for {
+ select {
+ case req := <-p.reloadCh:
+ if req.force {
+ force = true
+ }
+ if req.respCh != nil {
+ reload(req.respCh)
+ continue
+ }
+ if delayCh == nil {
+ timeoutCh = time.After(policyReloadMinDelay)
+ }
+ delayCh = time.After(policyReloadMaxDelay)
+ case <-delayCh:
+ reload(nil)
+ case <-timeoutCh:
+ reload(nil)
+ case <-p.closeCh:
+ break loop
+ }
+ }
+}
+
+func (p *Policy) closeInternal() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.sources.Close()
+ p.changeCallbacks.Close()
+ close(p.doneCh)
+ deletePolicy(p)
+}
+
+// Close initiates the closing of the policy.
+// The [Policy.Done] channel is closed to signal that the operation has been completed.
+func (p *Policy) Close() {
+ p.mu.Lock()
+ alreadyClosing := p.closing
+ watcherStarted := p.watcherStarted
+ p.closing = true
+ p.mu.Unlock()
+
+ if alreadyClosing {
+ return
+ }
+
+ close(p.closeCh)
+ if !watcherStarted {
+ // Normally, closing p.closeCh signals [Policy.watchReload] to exit,
+ // and [Policy.closeInternal] performs the actual closing when
+ // [Policy.watchReload] returns. However, if the watcher was never
+ // started, we need to call [Policy.closeInternal] manually.
+ go p.closeInternal()
+ }
+}
+
+func setForTest[T any](tb internal.TB, target *T, newValue T) {
+ oldValue := *target
+ tb.Cleanup(func() { *target = oldValue })
+ *target = newValue
+}
diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go
new file mode 100644
index 0000000000000..e4bfb1a886878
--- /dev/null
+++ b/util/syspolicy/rsop/resultant_policy_test.go
@@ -0,0 +1,981 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package rsop
+
+import (
+ "errors"
+ "slices"
+ "sort"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "tailscale.com/tstest"
+ "tailscale.com/util/syspolicy/setting"
+
+ "tailscale.com/util/syspolicy/source"
+)
+
+func TestGetEffectivePolicyNoSource(t *testing.T) {
+ tests := []struct {
+ name string
+ scope setting.PolicyScope
+ }{
+ {
+ name: "DevicePolicy",
+ scope: setting.DeviceScope,
+ },
+ {
+ name: "CurrentProfilePolicy",
+ scope: setting.CurrentProfileScope,
+ },
+ {
+ name: "CurrentUserPolicy",
+ scope: setting.CurrentUserScope,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var policy *Policy
+ t.Cleanup(func() {
+ if policy != nil {
+ policy.Close()
+ <-policy.Done()
+ }
+ })
+
+ // Make sure we don't create any goroutines.
+ // We intentionally call ResourceCheck after t.Cleanup, so that when the test exits,
+ // the resource check runs before the test cleanup closes the policy.
+ // This helps to report any unexpectedly created goroutines.
+ // The goal is to ensure that using the syspolicy package, and particularly
+ // the rsop sub-package, is not wasteful and does not create unnecessary goroutines
+ // on platforms without registered policy sources.
+ tstest.ResourceCheck(t)
+
+ policy, err := PolicyFor(tt.scope)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy for %v: %v", tt.scope, err)
+ }
+
+ if got := policy.Get(); got.Len() != 0 {
+ t.Errorf("Snapshot: got %v; want empty", got)
+ }
+
+ if got, err := policy.Reload(); err != nil {
+ t.Errorf("Reload failed: %v", err)
+ } else if got.Len() != 0 {
+ t.Errorf("Snapshot: got %v; want empty", got)
+ }
+ })
+ }
+}
+
+func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) {
+ type sourceConfig struct {
+ name string
+ scope setting.PolicyScope
+ settingKey setting.Key
+ settingValue string
+ wantEffective bool
+ }
+ tests := []struct {
+ name string
+ scope setting.PolicyScope
+ initialSources []sourceConfig
+ additionalSources []sourceConfig
+ wantSnapshot *setting.Snapshot
+ }{
+ {
+ name: "DevicePolicy/NoSources",
+ scope: setting.DeviceScope,
+ wantSnapshot: setting.NewSnapshot(nil, setting.DeviceScope),
+ },
+ {
+ name: "UserScope/NoSources",
+ scope: setting.CurrentUserScope,
+ wantSnapshot: setting.NewSnapshot(nil, setting.CurrentUserScope),
+ },
+ {
+ name: "DevicePolicy/OneInitialSource",
+ scope: setting.DeviceScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceA",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueA",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)),
+ }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)),
+ },
+ {
+ name: "DevicePolicy/OneAdditionalSource",
+ scope: setting.DeviceScope,
+ additionalSources: []sourceConfig{
+ {
+ name: "TestSourceA",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueA",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)),
+ }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)),
+ },
+ {
+ name: "DevicePolicy/ManyInitialSources/NoConflicts",
+ scope: setting.DeviceScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceA",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueA",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceB",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyB",
+ settingValue: "TestValueB",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceC",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyC",
+ settingValue: "TestValueC",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)),
+ "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)),
+ "TestKeyC": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)),
+ }, setting.DeviceScope),
+ },
+ {
+ name: "DevicePolicy/ManyInitialSources/Conflicts",
+ scope: setting.DeviceScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceA",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueA",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceB",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyB",
+ settingValue: "TestValueB",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceC",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueC",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)),
+ "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)),
+ }, setting.DeviceScope),
+ },
+ {
+ name: "DevicePolicy/MixedSources/Conflicts",
+ scope: setting.DeviceScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceA",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueA",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceB",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyB",
+ settingValue: "TestValueB",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceC",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueC",
+ wantEffective: true,
+ },
+ },
+ additionalSources: []sourceConfig{
+ {
+ name: "TestSourceD",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueD",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceE",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyC",
+ settingValue: "TestValueE",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceF",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "TestValueF",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("TestValueF", nil, setting.NewNamedOrigin("TestSourceF", setting.DeviceScope)),
+ "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)),
+ "TestKeyC": setting.RawItemWith("TestValueE", nil, setting.NewNamedOrigin("TestSourceE", setting.DeviceScope)),
+ }, setting.DeviceScope),
+ },
+ {
+ name: "UserScope/Init-DeviceSource",
+ scope: setting.CurrentUserScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceDevice",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "DeviceValue",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)),
+ }, setting.CurrentUserScope, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)),
+ },
+ {
+ name: "UserScope/Init-DeviceSource/Add-UserSource",
+ scope: setting.CurrentUserScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceDevice",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "DeviceValue",
+ wantEffective: true,
+ },
+ },
+ additionalSources: []sourceConfig{
+ {
+ name: "TestSourceUser",
+ scope: setting.CurrentUserScope,
+ settingKey: "TestKeyB",
+ settingValue: "UserValue",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)),
+ "TestKeyB": setting.RawItemWith("UserValue", nil, setting.NewNamedOrigin("TestSourceUser", setting.CurrentUserScope)),
+ }, setting.CurrentUserScope),
+ },
+ {
+ name: "UserScope/Init-DeviceSource/Add-UserSource-and-ProfileSource",
+ scope: setting.CurrentUserScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceDevice",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "DeviceValue",
+ wantEffective: true,
+ },
+ },
+ additionalSources: []sourceConfig{
+ {
+ name: "TestSourceProfile",
+ scope: setting.CurrentProfileScope,
+ settingKey: "TestKeyB",
+ settingValue: "ProfileValue",
+ wantEffective: true,
+ },
+ {
+ name: "TestSourceUser",
+ scope: setting.CurrentUserScope,
+ settingKey: "TestKeyB",
+ settingValue: "UserValue",
+ wantEffective: true,
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)),
+ "TestKeyB": setting.RawItemWith("ProfileValue", nil, setting.NewNamedOrigin("TestSourceProfile", setting.CurrentProfileScope)),
+ }, setting.CurrentUserScope),
+ },
+ {
+ name: "DevicePolicy/User-Source-does-not-apply",
+ scope: setting.DeviceScope,
+ initialSources: []sourceConfig{
+ {
+ name: "TestSourceDevice",
+ scope: setting.DeviceScope,
+ settingKey: "TestKeyA",
+ settingValue: "DeviceValue",
+ wantEffective: true,
+ },
+ },
+ additionalSources: []sourceConfig{
+ {
+ name: "TestSourceUser",
+ scope: setting.CurrentUserScope,
+ settingKey: "TestKeyA",
+ settingValue: "UserValue",
+ wantEffective: false, // Registering a user source should have no impact on the device policy.
+ },
+ },
+ wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{
+ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)),
+ }, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Register all settings that we use in this test.
+ var definitions []*setting.Definition
+ for _, source := range slices.Concat(tt.initialSources, tt.additionalSources) {
+ definitions = append(definitions, setting.NewDefinition(source.settingKey, tt.scope.Kind(), setting.StringValue))
+ }
+ if err := setting.SetDefinitionsForTest(t, definitions...); err != nil {
+ t.Fatalf("SetDefinitionsForTest failed: %v", err)
+ }
+
+ // Add the initial policy sources.
+ var wantSources []*source.Source
+ for _, s := range tt.initialSources {
+ store := source.NewTestStoreOf(t, source.TestSettingOf(s.settingKey, s.settingValue))
+ source := source.NewSource(s.name, s.scope, store)
+ if err := registerSource(source); err != nil {
+ t.Fatalf("Failed to register policy source: %v", source)
+ }
+ if s.wantEffective {
+ wantSources = append(wantSources, source)
+ }
+ t.Cleanup(func() { unregisterSource(source) })
+ }
+
+ // Retrieve the effective policy.
+ policy, err := policyForTest(t, tt.scope)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy for %v: %v", tt.scope, err)
+ }
+
+ checkPolicySources(t, policy, wantSources)
+
+ // Add additional setting sources.
+ for _, s := range tt.additionalSources {
+ store := source.NewTestStoreOf(t, source.TestSettingOf(s.settingKey, s.settingValue))
+ source := source.NewSource(s.name, s.scope, store)
+ if err := registerSource(source); err != nil {
+ t.Fatalf("Failed to register additional policy source: %v", source)
+ }
+ if s.wantEffective {
+ wantSources = append(wantSources, source)
+ }
+ t.Cleanup(func() { unregisterSource(source) })
+ }
+
+ checkPolicySources(t, policy, wantSources)
+
+ // Verify the final effective settings snapshots.
+ if got := policy.Get(); !got.Equal(tt.wantSnapshot) {
+ t.Errorf("Snapshot: got %v; want %v", got, tt.wantSnapshot)
+ }
+ })
+ }
+}
+
+func TestPolicyFor(t *testing.T) {
+ tests := []struct {
+ name string
+ scopeA, scopeB setting.PolicyScope
+ closePolicy bool // indicates whether to close policyA before retrieving policyB
+ wantSame bool // specifies whether policyA and policyB should reference the same [Policy] instance
+ }{
+ {
+ name: "Device/Device",
+ scopeA: setting.DeviceScope,
+ scopeB: setting.DeviceScope,
+ wantSame: true,
+ },
+ {
+ name: "Device/CurrentProfile",
+ scopeA: setting.DeviceScope,
+ scopeB: setting.CurrentProfileScope,
+ wantSame: false,
+ },
+ {
+ name: "Device/CurrentUser",
+ scopeA: setting.DeviceScope,
+ scopeB: setting.CurrentUserScope,
+ wantSame: false,
+ },
+ {
+ name: "CurrentProfile/CurrentProfile",
+ scopeA: setting.CurrentProfileScope,
+ scopeB: setting.CurrentProfileScope,
+ wantSame: true,
+ },
+ {
+ name: "CurrentProfile/CurrentUser",
+ scopeA: setting.CurrentProfileScope,
+ scopeB: setting.CurrentUserScope,
+ wantSame: false,
+ },
+ {
+ name: "CurrentUser/CurrentUser",
+ scopeA: setting.CurrentUserScope,
+ scopeB: setting.CurrentUserScope,
+ wantSame: true,
+ },
+ {
+ name: "UserA/UserA",
+ scopeA: setting.UserScopeOf("UserA"),
+ scopeB: setting.UserScopeOf("UserA"),
+ wantSame: true,
+ },
+ {
+ name: "UserA/UserB",
+ scopeA: setting.UserScopeOf("UserA"),
+ scopeB: setting.UserScopeOf("UserB"),
+ wantSame: false,
+ },
+ {
+ name: "New-after-close",
+ scopeA: setting.DeviceScope,
+ scopeB: setting.DeviceScope,
+ closePolicy: true,
+ wantSame: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ policyA, err := policyForTest(t, tt.scopeA)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy for %v: %v", tt.scopeA, err)
+ }
+
+ if tt.closePolicy {
+ policyA.Close()
+ }
+
+ policyB, err := policyForTest(t, tt.scopeB)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy for %v: %v", tt.scopeB, err)
+ }
+
+ if gotSame := policyA == policyB; gotSame != tt.wantSame {
+ t.Fatalf("Got same: %v; want same %v", gotSame, tt.wantSame)
+ }
+ })
+ }
+}
+
+func TestPolicyChangeHasChanged(t *testing.T) {
+ tests := []struct {
+ name string
+ old, new map[setting.Key]setting.RawItem
+ wantChanged []setting.Key
+ wantUnchanged []setting.Key
+ }{
+ {
+ name: "String-Settings",
+ old: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf("Old"),
+ "UnchangedSetting": setting.RawItemOf("Value"),
+ },
+ new: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf("New"),
+ "UnchangedSetting": setting.RawItemOf("Value"),
+ },
+ wantChanged: []setting.Key{"ChangedSetting"},
+ wantUnchanged: []setting.Key{"UnchangedSetting"},
+ },
+ {
+ name: "UInt64-Settings",
+ old: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf(uint64(0)),
+ "UnchangedSetting": setting.RawItemOf(uint64(42)),
+ },
+ new: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf(uint64(1)),
+ "UnchangedSetting": setting.RawItemOf(uint64(42)),
+ },
+ wantChanged: []setting.Key{"ChangedSetting"},
+ wantUnchanged: []setting.Key{"UnchangedSetting"},
+ },
+ {
+ name: "StringSlice-Settings",
+ old: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf([]string{"Chicago"}),
+ "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}),
+ },
+ new: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf([]string{"New York"}),
+ "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}),
+ },
+ wantChanged: []setting.Key{"ChangedSetting"},
+ wantUnchanged: []setting.Key{"UnchangedSetting"},
+ },
+ {
+ name: "Int8-Settings", // We don't have actual int8 settings, but this should still work.
+ old: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf(int8(0)),
+ "UnchangedSetting": setting.RawItemOf(int8(42)),
+ },
+ new: map[setting.Key]setting.RawItem{
+ "ChangedSetting": setting.RawItemOf(int8(1)),
+ "UnchangedSetting": setting.RawItemOf(int8(42)),
+ },
+ wantChanged: []setting.Key{"ChangedSetting"},
+ wantUnchanged: []setting.Key{"UnchangedSetting"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ old := setting.NewSnapshot(tt.old)
+ new := setting.NewSnapshot(tt.new)
+ change := PolicyChange{Change[*setting.Snapshot]{old, new}}
+ for _, wantChanged := range tt.wantChanged {
+ if !change.HasChanged(wantChanged) {
+ t.Errorf("%q changed: got false; want true", wantChanged)
+ }
+ }
+ for _, wantUnchanged := range tt.wantUnchanged {
+ if change.HasChanged(wantUnchanged) {
+ t.Errorf("%q unchanged: got true; want false", wantUnchanged)
+ }
+ }
+ })
+ }
+}
+
+func TestChangePolicySetting(t *testing.T) {
+ // Register policy settings used in this test.
+ settingA := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue)
+ settingB := setting.NewDefinition("TestSettingB", setting.DeviceSetting, setting.StringValue)
+ if err := setting.SetDefinitionsForTest(t, settingA, settingB); err != nil {
+ t.Fatalf("SetDefinitionsForTest failed: %v", err)
+ }
+
+ // Register a test policy store and create a effective policy that reads the policy settings from it.
+ store := source.NewTestStoreOf[string](t)
+ if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil {
+ t.Fatalf("Failed to register policy store: %v", err)
+ }
+
+ setForTest(t, &policyReloadMinDelay, 100*time.Millisecond)
+ setForTest(t, &policyReloadMaxDelay, 500*time.Millisecond)
+
+ policy, err := policyForTest(t, setting.DeviceScope)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy: %v", err)
+ }
+
+ // The policy setting is not configured yet.
+ if _, ok := policy.Get().GetSetting(settingA.Key()); ok {
+ t.Fatalf("Policy setting %q unexpectedly exists", settingA.Key())
+ }
+
+ // Subscribe to the policy change callback...
+ policyChanged := make(chan *PolicyChange)
+ unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc })
+ t.Cleanup(unregister)
+
+ // ...make the change, and measure the time between initiating the change
+ // and receiving the callback.
+ start := time.Now()
+ const wantValueA = "TestValueA"
+ store.SetStrings(source.TestSettingOf(settingA.Key(), wantValueA))
+ change := <-policyChanged
+ gotDelay := time.Since(start)
+
+ // Ensure there is at least a [policyReloadMinDelay] delay between
+ // a change and the policy reload along with the callback invocation.
+ // This prevents reloading policy settings too frequently
+ // when multiple settings change within a short period of time.
+ if gotDelay < policyReloadMinDelay {
+ t.Errorf("Delay: got %v; want >= %v", gotDelay, policyReloadMinDelay)
+ }
+
+ // Verify that the [PolicyChange] passed to the policy change callback
+ // contains the correct information regarding the policy setting changes.
+ if !change.HasChanged(settingA.Key()) {
+ t.Errorf("Policy setting %q has not changed", settingA.Key())
+ }
+ if change.HasChanged(settingB.Key()) {
+ t.Errorf("Policy setting %q was unexpectedly changed", settingB.Key())
+ }
+ if _, ok := change.Old().GetSetting(settingA.Key()); ok {
+ t.Fatalf("Policy setting %q unexpectedly exists", settingA.Key())
+ }
+ if gotValue := change.New().Get(settingA.Key()); gotValue != wantValueA {
+ t.Errorf("Policy setting %q: got %q; want %q", settingA.Key(), gotValue, wantValueA)
+ }
+
+ // And also verify that the current (most recent) [setting.Snapshot]
+ // includes the change we just made.
+ if gotValue := policy.Get().Get(settingA.Key()); gotValue != wantValueA {
+ t.Errorf("Policy setting %q: got %q; want %q", settingA.Key(), gotValue, wantValueA)
+ }
+
+ // Now, let's change another policy setting value N times.
+ const N = 10
+ wantValueB := strconv.Itoa(N)
+ start = time.Now()
+ for i := range N {
+ store.SetStrings(source.TestSettingOf(settingB.Key(), strconv.Itoa(i+1)))
+ }
+
+ // The callback should be invoked only once, even though the policy setting
+ // has changed N times.
+ change = <-policyChanged
+ gotDelay = time.Since(start)
+ gotCallbacks := 1
+drain:
+ for {
+ select {
+ case <-policyChanged:
+ gotCallbacks++
+ case <-time.After(policyReloadMaxDelay):
+ break drain
+ }
+ }
+ if wantCallbacks := 1; gotCallbacks > wantCallbacks {
+ t.Errorf("Callbacks: got %d; want %d", gotCallbacks, wantCallbacks)
+ }
+
+ // Additionally, the policy change callback should be received no sooner
+ // than [policyReloadMinDelay] and no later than [policyReloadMaxDelay].
+ if gotDelay < policyReloadMinDelay || gotDelay > policyReloadMaxDelay {
+ t.Errorf("Delay: got %v; want >= %v && <= %v", gotDelay, policyReloadMinDelay, policyReloadMaxDelay)
+ }
+
+ // Verify that the [PolicyChange] received via the callback
+ // contains the final policy setting value.
+ if !change.HasChanged(settingB.Key()) {
+ t.Errorf("Policy setting %q has not changed", settingB.Key())
+ }
+ if change.HasChanged(settingA.Key()) {
+ t.Errorf("Policy setting %q was unexpectedly changed", settingA.Key())
+ }
+ if _, ok := change.Old().GetSetting(settingB.Key()); ok {
+ t.Fatalf("Policy setting %q unexpectedly exists", settingB.Key())
+ }
+ if gotValue := change.New().Get(settingB.Key()); gotValue != wantValueB {
+ t.Errorf("Policy setting %q: got %q; want %q", settingB.Key(), gotValue, wantValueB)
+ }
+
+ // Lastly, if a policy store issues a change notification, but the effective policy
+ // remains unchanged, the [Policy] should ignore it without invoking the change callbacks.
+ store.NotifyPolicyChanged()
+ select {
+ case <-policyChanged:
+ t.Fatal("Unexpected policy changed notification")
+ case <-time.After(policyReloadMaxDelay):
+ }
+}
+
+func TestClosePolicySource(t *testing.T) {
+ testSetting := setting.NewDefinition("TestSetting", setting.DeviceSetting, setting.StringValue)
+ if err := setting.SetDefinitionsForTest(t, testSetting); err != nil {
+ t.Fatalf("SetDefinitionsForTest failed: %v", err)
+ }
+
+ wantSettingValue := "TestValue"
+ store := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), wantSettingValue))
+ if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil {
+ t.Fatalf("Failed to register policy store: %v", err)
+ }
+ policy, err := policyForTest(t, setting.DeviceScope)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy: %v", err)
+ }
+
+ initialSnapshot, err := policy.Reload()
+ if err != nil {
+ t.Fatalf("Failed to reload policy: %v", err)
+ }
+ if gotSettingValue, err := initialSnapshot.GetErr(testSetting.Key()); err != nil {
+ t.Fatalf("Failed to get %q setting value: %v", testSetting.Key(), err)
+ } else if gotSettingValue != wantSettingValue {
+ t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), gotSettingValue, wantSettingValue)
+ }
+
+ store.Close()
+
+ // Closing a policy source abruptly without removing it first should invalidate and close the policy.
+ <-policy.Done()
+ if policy.IsValid() {
+ t.Fatal("The policy was not properly closed")
+ }
+
+ // The resulting policy snapshot should remain valid and unchanged.
+ finalSnapshot := policy.Get()
+ if !finalSnapshot.Equal(initialSnapshot) {
+ t.Fatal("Policy snapshot has changed")
+ }
+ if gotSettingValue, err := finalSnapshot.GetErr(testSetting.Key()); err != nil {
+ t.Fatalf("Failed to get final %q setting value: %v", testSetting.Key(), err)
+ } else if gotSettingValue != wantSettingValue {
+ t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), gotSettingValue, wantSettingValue)
+ }
+
+ // However, any further requests to reload the policy should fail.
+ if _, err := policy.Reload(); err == nil || !errors.Is(err, ErrPolicyClosed) {
+ t.Fatalf("Reload: gotErr: %v; wantErr: %v", err, ErrPolicyClosed)
+ }
+}
+
+func TestRemovePolicySource(t *testing.T) {
+ // Register policy settings used in this test.
+ settingA := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue)
+ settingB := setting.NewDefinition("TestSettingB", setting.DeviceSetting, setting.StringValue)
+ if err := setting.SetDefinitionsForTest(t, settingA, settingB); err != nil {
+ t.Fatalf("SetDefinitionsForTest failed: %v", err)
+ }
+
+ // Register two policy stores.
+ storeA := source.NewTestStoreOf(t, source.TestSettingOf(settingA.Key(), "A"))
+ storeRegA, err := RegisterStoreForTest(t, "TestSourceA", setting.DeviceScope, storeA)
+ if err != nil {
+ t.Fatalf("Failed to register policy store A: %v", err)
+ }
+ storeB := source.NewTestStoreOf(t, source.TestSettingOf(settingB.Key(), "B"))
+ storeRegB, err := RegisterStoreForTest(t, "TestSourceB", setting.DeviceScope, storeB)
+ if err != nil {
+ t.Fatalf("Failed to register policy store A: %v", err)
+ }
+
+ // Create a effective [Policy] that reads policy settings from the two stores.
+ policy, err := policyForTest(t, setting.DeviceScope)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy: %v", err)
+ }
+
+ // Verify that the [Policy] uses both stores and includes policy settings from each.
+ if gotSources, wantSources := len(policy.sources), 2; gotSources != wantSources {
+ t.Fatalf("Policy Sources: got %v; want %v", gotSources, wantSources)
+ }
+ if got, want := policy.Get().Get(settingA.Key()), "A"; got != want {
+ t.Fatalf("Setting %q: got %q; want %q", settingA.Key(), got, want)
+ }
+ if got, want := policy.Get().Get(settingB.Key()), "B"; got != want {
+ t.Fatalf("Setting %q: got %q; want %q", settingB.Key(), got, want)
+ }
+
+ // Unregister Store A and verify that the effective policy remains valid.
+ // It should no longer use the removed store or include any policy settings from it.
+ if err := storeRegA.Unregister(); err != nil {
+ t.Fatalf("Failed to unregister Store A: %v", err)
+ }
+ if !policy.IsValid() {
+ t.Fatalf("Policy was unexpectedly closed")
+ }
+ if gotSources, wantSources := len(policy.sources), 1; gotSources != wantSources {
+ t.Fatalf("Policy Sources: got %v; want %v", gotSources, wantSources)
+ }
+ if got, want := policy.Get().Get(settingA.Key()), any(nil); got != want {
+ t.Fatalf("Setting %q: got %q; want %q", settingA.Key(), got, want)
+ }
+ if got, want := policy.Get().Get(settingB.Key()), "B"; got != want {
+ t.Fatalf("Setting %q: got %q; want %q", settingB.Key(), got, want)
+ }
+
+ // Unregister Store B and verify that the effective policy is still valid.
+ // However, it should be empty since there are no associated policy sources.
+ if err := storeRegB.Unregister(); err != nil {
+ t.Fatalf("Failed to unregister Store B: %v", err)
+ }
+ if !policy.IsValid() {
+ t.Fatalf("Policy was unexpectedly closed")
+ }
+ if gotSources, wantSources := len(policy.sources), 0; gotSources != wantSources {
+ t.Fatalf("Policy Sources: got %v; want %v", gotSources, wantSources)
+ }
+ if got := policy.Get(); got.Len() != 0 {
+ t.Fatalf("Settings: got %v; want {Empty}", got)
+ }
+}
+
+func TestReplacePolicySource(t *testing.T) {
+ setForTest(t, &policyReloadMinDelay, 100*time.Millisecond)
+ setForTest(t, &policyReloadMaxDelay, 500*time.Millisecond)
+
+ // Register policy settings used in this test.
+ testSetting := setting.NewDefinition("TestSettingA", setting.DeviceSetting, setting.StringValue)
+ if err := setting.SetDefinitionsForTest(t, testSetting); err != nil {
+ t.Fatalf("SetDefinitionsForTest failed: %v", err)
+ }
+
+ // Create two policy stores.
+ initialStore := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), "InitialValue"))
+ newStore := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), "NewValue"))
+ unchangedStore := source.NewTestStoreOf(t, source.TestSettingOf(testSetting.Key(), "NewValue"))
+
+ // Register the initial store and create a effective [Policy] that reads policy settings from it.
+ reg, err := RegisterStoreForTest(t, "TestStore", setting.DeviceScope, initialStore)
+ if err != nil {
+ t.Fatalf("Failed to register the initial store: %v", err)
+ }
+ policy, err := policyForTest(t, setting.DeviceScope)
+ if err != nil {
+ t.Fatalf("Failed to get effective policy: %v", err)
+ }
+
+ // Verify that the test setting has its initial value.
+ if got, want := policy.Get().Get(testSetting.Key()), "InitialValue"; got != want {
+ t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), got, want)
+ }
+
+ // Subscribe to the policy change callback.
+ policyChanged := make(chan *PolicyChange, 1)
+ unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc })
+ t.Cleanup(unregister)
+
+ // Now, let's replace the initial store with the new store.
+ reg, err = reg.ReplaceStore(newStore)
+ if err != nil {
+ t.Fatalf("Failed to replace the policy store: %v", err)
+ }
+ t.Cleanup(func() { reg.Unregister() })
+
+ // We should receive a policy change notification as the setting value has changed.
+ <-policyChanged
+
+ // Verify that the test setting has the new value.
+ if got, want := policy.Get().Get(testSetting.Key()), "NewValue"; got != want {
+ t.Fatalf("Setting %q: got %q; want %q", testSetting.Key(), got, want)
+ }
+
+ // Replacing a policy store with an identical one containing the same
+ // values for the same settings should not be considered a policy change.
+ reg, err = reg.ReplaceStore(unchangedStore)
+ if err != nil {
+ t.Fatalf("Failed to replace the policy store: %v", err)
+ }
+ t.Cleanup(func() { reg.Unregister() })
+
+ select {
+ case <-policyChanged:
+ t.Fatal("Unexpected policy changed notification")
+ default:
+ <-time.After(policyReloadMaxDelay)
+ }
+}
+
+func TestAddClosedPolicySource(t *testing.T) {
+ store := source.NewTestStoreOf[string](t)
+ if _, err := RegisterStoreForTest(t, "TestSource", setting.DeviceScope, store); err != nil {
+ t.Fatalf("Failed to register policy store: %v", err)
+ }
+ store.Close()
+
+ _, err := policyForTest(t, setting.DeviceScope)
+ if err == nil || !errors.Is(err, source.ErrStoreClosed) {
+ t.Fatalf("got: %v; want: %v", err, source.ErrStoreClosed)
+ }
+}
+
+func TestClosePolicyMoreThanOnce(t *testing.T) {
+ tests := []struct {
+ name string
+ numSources int
+ }{
+ {
+ name: "NoSources",
+ numSources: 0,
+ },
+ {
+ name: "OneSource",
+ numSources: 1,
+ },
+ {
+ name: "ManySources",
+ numSources: 10,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ for i := range tt.numSources {
+ store := source.NewTestStoreOf[string](t)
+ if _, err := RegisterStoreForTest(t, "TestSource #"+strconv.Itoa(i), setting.DeviceScope, store); err != nil {
+ t.Fatalf("Failed to register policy store: %v", err)
+ }
+ }
+
+ policy, err := policyForTest(t, setting.DeviceScope)
+ if err != nil {
+ t.Fatalf("failed to get effective policy: %v", err)
+ }
+
+ const N = 10000
+ var wg sync.WaitGroup
+ for range N {
+ wg.Add(1)
+ go func() {
+ wg.Done()
+ policy.Close()
+ <-policy.Done()
+ }()
+ }
+ wg.Wait()
+ })
+ }
+}
+
+func checkPolicySources(tb testing.TB, gotPolicy *Policy, wantSources []*source.Source) {
+ tb.Helper()
+ sort.SliceStable(wantSources, func(i, j int) bool {
+ return wantSources[i].Compare(wantSources[j]) < 0
+ })
+ gotSources := make([]*source.Source, len(gotPolicy.sources))
+ for i := range gotPolicy.sources {
+ gotSources[i] = gotPolicy.sources[i].Source
+ }
+ type sourceSummary struct{ Name, Scope string }
+ toSourceSummary := cmp.Transformer("source", func(s *source.Source) sourceSummary { return sourceSummary{s.Name(), s.Scope().String()} })
+ if diff := cmp.Diff(wantSources, gotSources, toSourceSummary, cmpopts.EquateEmpty()); diff != "" {
+ tb.Errorf("Policy Sources mismatch: %v", diff)
+ }
+}
+
+// policyForTest is like [PolicyFor], but it deletes the policy
+// when tb and all its subtests complete.
+func policyForTest(tb testing.TB, target setting.PolicyScope) (*Policy, error) {
+ tb.Helper()
+
+ policy, err := PolicyFor(target)
+ if err != nil {
+ return nil, err
+ }
+ tb.Cleanup(func() {
+ policy.Close()
+ <-policy.Done()
+ deletePolicy(policy)
+ })
+ return policy, nil
+}
diff --git a/util/syspolicy/rsop/rsop.go b/util/syspolicy/rsop/rsop.go
new file mode 100644
index 0000000000000..429b9b10121b3
--- /dev/null
+++ b/util/syspolicy/rsop/rsop.go
@@ -0,0 +1,174 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package rsop facilitates [source.Store] registration via [RegisterStore]
+// and provides access to the effective policy merged from all registered sources
+// via [PolicyFor].
+package rsop
+
+import (
+ "errors"
+ "fmt"
+ "slices"
+ "sync"
+
+ "tailscale.com/syncs"
+ "tailscale.com/util/slicesx"
+ "tailscale.com/util/syspolicy/internal"
+ "tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
+)
+
+var (
+ policyMu sync.Mutex // protects [policySources] and [effectivePolicies]
+ policySources []*source.Source // all registered policy sources
+ effectivePolicies []*Policy // all active (non-closed) effective policies returned by [PolicyFor]
+
+ // effectivePolicyLRU is an LRU cache of [Policy] by [setting.Scope].
+ // Although there could be multiple [setting.PolicyScope] instances with the same [setting.Scope],
+ // such as two user scopes for different users, there is only one [setting.DeviceScope], only one
+ // [setting.CurrentProfileScope], and in most cases, only one active user scope.
+ // Therefore, cache misses that require falling back to [effectivePolicies] are extremely rare.
+ // It's a fixed-size array of atomic values and can be accessed without [policyMu] held.
+ effectivePolicyLRU [setting.NumScopes]syncs.AtomicValue[*Policy]
+)
+
+// PolicyFor returns the [Policy] for the specified scope,
+// creating it from the registered [source.Store]s if it doesn't already exist.
+func PolicyFor(scope setting.PolicyScope) (*Policy, error) {
+ if err := internal.Init.Do(); err != nil {
+ return nil, err
+ }
+ policy := effectivePolicyLRU[scope.Kind()].Load()
+ if policy != nil && policy.Scope() == scope && policy.IsValid() {
+ return policy, nil
+ }
+ return policyForSlow(scope)
+}
+
+func policyForSlow(scope setting.PolicyScope) (policy *Policy, err error) {
+ defer func() {
+ // Always update the LRU cache on exit if we found (or created)
+ // a policy for the specified scope.
+ if policy != nil {
+ effectivePolicyLRU[scope.Kind()].Store(policy)
+ }
+ }()
+
+ policyMu.Lock()
+ defer policyMu.Unlock()
+ if policy, ok := findPolicyByScopeLocked(scope); ok {
+ return policy, nil
+ }
+
+ // If there is no existing effective policy for the specified scope,
+ // we need to create one using the policy sources registered for that scope.
+ sources := slicesx.Filter(nil, policySources, func(source *source.Source) bool {
+ return source.Scope().Contains(scope)
+ })
+ policy, err = newPolicy(scope, sources...)
+ if err != nil {
+ return nil, err
+ }
+ effectivePolicies = append(effectivePolicies, policy)
+ return policy, nil
+}
+
+// findPolicyByScopeLocked returns a policy with the specified scope and true if
+// one exists in the [effectivePolicies] list, otherwise it returns nil, false.
+// [policyMu] must be held.
+func findPolicyByScopeLocked(target setting.PolicyScope) (policy *Policy, ok bool) {
+ for _, policy := range effectivePolicies {
+ if policy.Scope() == target && policy.IsValid() {
+ return policy, true
+ }
+ }
+ return nil, false
+}
+
+// deletePolicy deletes the specified effective policy from [effectivePolicies]
+// and [effectivePolicyLRU].
+func deletePolicy(policy *Policy) {
+ policyMu.Lock()
+ defer policyMu.Unlock()
+ if i := slices.Index(effectivePolicies, policy); i != -1 {
+ effectivePolicies = slices.Delete(effectivePolicies, i, i+1)
+ }
+ effectivePolicyLRU[policy.Scope().Kind()].CompareAndSwap(policy, nil)
+}
+
+// registerSource registers the specified [source.Source] to be used by the package.
+// It updates existing [Policy]s returned by [PolicyFor] to use this source if
+// they are within the source's [setting.PolicyScope].
+func registerSource(source *source.Source) error {
+ policyMu.Lock()
+ defer policyMu.Unlock()
+ if slices.Contains(policySources, source) {
+ // already registered
+ return nil
+ }
+ policySources = append(policySources, source)
+ return forEachEffectivePolicyLocked(func(policy *Policy) error {
+ if !source.Scope().Contains(policy.Scope()) {
+ // Policy settings in the specified source do not apply
+ // to the scope of this effective policy.
+ // For example, a user policy source is being registered
+ // while the effective policy is for the device (or another user).
+ return nil
+ }
+ return policy.addSource(source)
+ })
+}
+
+// replaceSource is like [unregisterSource](old) followed by [registerSource](new),
+// but performed atomically: the effective policy will contain settings
+// either from the old source or the new source, never both and never neither.
+func replaceSource(old, new *source.Source) error {
+ policyMu.Lock()
+ defer policyMu.Unlock()
+ oldIndex := slices.Index(policySources, old)
+ if oldIndex == -1 {
+ return fmt.Errorf("the source is not registered: %v", old)
+ }
+ policySources[oldIndex] = new
+ return forEachEffectivePolicyLocked(func(policy *Policy) error {
+ if !old.Scope().Contains(policy.Scope()) || !new.Scope().Contains(policy.Scope()) {
+ return nil
+ }
+ return policy.replaceSource(old, new)
+ })
+}
+
+// unregisterSource unregisters the specified [source.Source],
+// so that it won't be used by any new or existing [Policy].
+func unregisterSource(source *source.Source) error {
+ policyMu.Lock()
+ defer policyMu.Unlock()
+ index := slices.Index(policySources, source)
+ if index == -1 {
+ return nil
+ }
+ policySources = slices.Delete(policySources, index, index+1)
+ return forEachEffectivePolicyLocked(func(policy *Policy) error {
+ if !source.Scope().Contains(policy.Scope()) {
+ return nil
+ }
+ return policy.removeSource(source)
+ })
+}
+
+// forEachEffectivePolicyLocked calls fn for every non-closed [Policy] in [effectivePolicies].
+// It accumulates the returned errors and returns an error that wraps all errors returned by fn.
+// The [policyMu] mutex must be held while this function is executed.
+func forEachEffectivePolicyLocked(fn func(p *Policy) error) error {
+ var errs []error
+ for _, policy := range effectivePolicies {
+ if policy.IsValid() {
+ err := fn(policy)
+ if err != nil && !errors.Is(err, ErrPolicyClosed) {
+ errs = append(errs, err)
+ }
+ }
+ }
+ return errors.Join(errs...)
+}
diff --git a/util/syspolicy/rsop/store_registration.go b/util/syspolicy/rsop/store_registration.go
new file mode 100644
index 0000000000000..f9836846e18ee
--- /dev/null
+++ b/util/syspolicy/rsop/store_registration.go
@@ -0,0 +1,98 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package rsop
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "tailscale.com/util/syspolicy/internal"
+ "tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
+)
+
+// ErrAlreadyConsumed is the error returned when [StoreRegistration.ReplaceStore]
+// or [StoreRegistration.Unregister] is called more than once.
+var ErrAlreadyConsumed = errors.New("the store registration is no longer valid")
+
+// StoreRegistration is a [source.Store] registered for use in the specified scope.
+// It can be used to unregister the store, or replace it with another one.
+type StoreRegistration struct {
+ source *source.Source
+ m sync.Mutex // protects the [StoreRegistration.consumeSlow] path
+ consumed atomic.Bool // can be read without holding m, but must be written with m held
+}
+
+// RegisterStore registers a new policy [source.Store] with the specified name and [setting.PolicyScope].
+func RegisterStore(name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) {
+ return newStoreRegistration(name, scope, store)
+}
+
+// RegisterStoreForTest is like [RegisterStore], but unregisters the store when
+// tb and all its subtests complete.
+func RegisterStoreForTest(tb internal.TB, name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) {
+ setForTest(tb, &policyReloadMinDelay, 10*time.Millisecond)
+ setForTest(tb, &policyReloadMaxDelay, 500*time.Millisecond)
+
+ reg, err := RegisterStore(name, scope, store)
+ if err == nil {
+ tb.Cleanup(func() {
+ if err := reg.Unregister(); err != nil && !errors.Is(err, ErrAlreadyConsumed) {
+ tb.Fatalf("Unregister failed: %v", err)
+ }
+ })
+ }
+ return reg, err // may be nil or non-nil
+}
+
+func newStoreRegistration(name string, scope setting.PolicyScope, store source.Store) (*StoreRegistration, error) {
+ source := source.NewSource(name, scope, store)
+ if err := registerSource(source); err != nil {
+ return nil, err
+ }
+ return &StoreRegistration{source: source}, nil
+}
+
+// ReplaceStore replaces the registered store with the new one,
+// returning a new [StoreRegistration] or an error.
+func (r *StoreRegistration) ReplaceStore(new source.Store) (*StoreRegistration, error) {
+ var res *StoreRegistration
+ err := r.consume(func() error {
+ newSource := source.NewSource(r.source.Name(), r.source.Scope(), new)
+ if err := replaceSource(r.source, newSource); err != nil {
+ return err
+ }
+ res = &StoreRegistration{source: newSource}
+ return nil
+ })
+ return res, err
+}
+
+// Unregister reverts the registration.
+func (r *StoreRegistration) Unregister() error {
+ return r.consume(func() error { return unregisterSource(r.source) })
+}
+
+// consume invokes fn, consuming r if no error is returned.
+// It returns [ErrAlreadyConsumed] on subsequent calls after the first successful call.
+func (r *StoreRegistration) consume(fn func() error) (err error) {
+ if r.consumed.Load() {
+ return ErrAlreadyConsumed
+ }
+ return r.consumeSlow(fn)
+}
+
+func (r *StoreRegistration) consumeSlow(fn func() error) (err error) {
+ r.m.Lock()
+ defer r.m.Unlock()
+ if r.consumed.Load() {
+ return ErrAlreadyConsumed
+ }
+ if err = fn(); err == nil {
+ r.consumed.Store(true)
+ }
+ return err // may be nil or non-nil
+}
diff --git a/util/syspolicy/setting/key.go b/util/syspolicy/setting/key.go
index 406fde1321cc2..aa7606d36324a 100644
--- a/util/syspolicy/setting/key.go
+++ b/util/syspolicy/setting/key.go
@@ -10,4 +10,4 @@ package setting
type Key string
// KeyPathSeparator allows logical grouping of policy settings into categories.
-const KeyPathSeparator = "/"
+const KeyPathSeparator = '/'
diff --git a/util/syspolicy/setting/policy_scope.go b/util/syspolicy/setting/policy_scope.go
index 55fa339e7e813..c2039fdda15b8 100644
--- a/util/syspolicy/setting/policy_scope.go
+++ b/util/syspolicy/setting/policy_scope.go
@@ -8,6 +8,7 @@ import (
"strings"
"tailscale.com/types/lazy"
+ "tailscale.com/util/syspolicy/internal"
)
var (
@@ -35,6 +36,8 @@ type PolicyScope struct {
// when querying policy settings.
// It returns [DeviceScope], unless explicitly changed with [SetDefaultScope].
func DefaultScope() PolicyScope {
+ // Allow deferred package init functions to override the default scope.
+ internal.Init.Do()
return lazyDefaultScope.Get(func() PolicyScope { return DeviceScope })
}
diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go
index 30480d8923f71..cf46e54b76217 100644
--- a/util/syspolicy/setting/raw_item.go
+++ b/util/syspolicy/setting/raw_item.go
@@ -5,7 +5,11 @@ package setting
import (
"fmt"
+ "reflect"
+ jsonv2 "github.com/go-json-experiment/json"
+ "github.com/go-json-experiment/json/jsontext"
+ "tailscale.com/types/opt"
"tailscale.com/types/structs"
)
@@ -17,10 +21,15 @@ import (
// or converted from strings, these setting types predate the typed policy
// hierarchies, and must be supported at this layer.
type RawItem struct {
- _ structs.Incomparable
- value any
- err *ErrorText
- origin *Origin // or nil
+ _ structs.Incomparable
+ data rawItemJSON
+}
+
+// rawItemJSON holds JSON-marshallable data for [RawItem].
+type rawItemJSON struct {
+ Value RawValue `json:",omitzero"`
+ Error *ErrorText `json:",omitzero"` // or nil
+ Origin *Origin `json:",omitzero"` // or nil
}
// RawItemOf returns a [RawItem] with the specified value.
@@ -30,20 +39,20 @@ func RawItemOf(value any) RawItem {
// RawItemWith returns a [RawItem] with the specified value, error and origin.
func RawItemWith(value any, err *ErrorText, origin *Origin) RawItem {
- return RawItem{value: value, err: err, origin: origin}
+ return RawItem{data: rawItemJSON{Value: RawValue{opt.ValueOf(value)}, Error: err, Origin: origin}}
}
// Value returns the value of the policy setting, or nil if the policy setting
// is not configured, or an error occurred while reading it.
func (i RawItem) Value() any {
- return i.value
+ return i.data.Value.Get()
}
// Error returns the error that occurred when reading the policy setting,
// or nil if no error occurred.
func (i RawItem) Error() error {
- if i.err != nil {
- return i.err
+ if i.data.Error != nil {
+ return i.data.Error
}
return nil
}
@@ -51,17 +60,103 @@ func (i RawItem) Error() error {
// Origin returns an optional [Origin] indicating where the policy setting is
// configured.
func (i RawItem) Origin() *Origin {
- return i.origin
+ return i.data.Origin
}
// String implements [fmt.Stringer].
func (i RawItem) String() string {
var suffix string
- if i.origin != nil {
- suffix = fmt.Sprintf(" - {%v}", i.origin)
+ if i.data.Origin != nil {
+ suffix = fmt.Sprintf(" - {%v}", i.data.Origin)
+ }
+ if i.data.Error != nil {
+ return fmt.Sprintf("Error{%q}%s", i.data.Error.Error(), suffix)
+ }
+ return fmt.Sprintf("%v%s", i.data.Value.Value, suffix)
+}
+
+// MarshalJSONV2 implements [jsonv2.MarshalerV2].
+func (i RawItem) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error {
+ return jsonv2.MarshalEncode(out, &i.data, opts)
+}
+
+// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2].
+func (i *RawItem) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error {
+ return jsonv2.UnmarshalDecode(in, &i.data, opts)
+}
+
+// MarshalJSON implements [json.Marshaler].
+func (i RawItem) MarshalJSON() ([]byte, error) {
+ return jsonv2.Marshal(i) // uses MarshalJSONV2
+}
+
+// UnmarshalJSON implements [json.Unmarshaler].
+func (i *RawItem) UnmarshalJSON(b []byte) error {
+ return jsonv2.Unmarshal(b, i) // uses UnmarshalJSONV2
+}
+
+// RawValue represents a raw policy setting value read from a policy store.
+// It is JSON-marshallable and facilitates unmarshalling of JSON values
+// into corresponding policy setting types, with special handling for JSON numbers
+// (unmarshalled as float64) and JSON string arrays (unmarshalled as []string).
+// See also [RawValue.UnmarshalJSONV2].
+type RawValue struct {
+ opt.Value[any]
+}
+
+// RawValueType is a constraint that permits raw setting value types.
+type RawValueType interface {
+ bool | uint64 | string | []string
+}
+
+// RawValueOf returns a new [RawValue] holding the specified value.
+func RawValueOf[T RawValueType](v T) RawValue {
+ return RawValue{opt.ValueOf[any](v)}
+}
+
+// MarshalJSONV2 implements [jsonv2.MarshalerV2].
+func (v RawValue) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error {
+ return jsonv2.MarshalEncode(out, v.Value, opts)
+}
+
+// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2] by attempting to unmarshal
+// a JSON value as one of the supported policy setting value types (bool, string, uint64, or []string),
+// based on the JSON value type. It fails if the JSON value is an object, if it's a JSON number that
+// cannot be represented as a uint64, or if a JSON array contains anything other than strings.
+func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error {
+ var valPtr any
+ switch k := in.PeekKind(); k {
+ case 't', 'f':
+ valPtr = new(bool)
+ case '"':
+ valPtr = new(string)
+ case '0':
+ valPtr = new(uint64) // unmarshal JSON numbers as uint64
+ case '[', 'n':
+ valPtr = new([]string) // unmarshal arrays as string slices
+ case '{':
+ return fmt.Errorf("unexpected token: %v", k)
+ default:
+ panic("unreachable")
}
- if i.err != nil {
- return fmt.Sprintf("Error{%q}%s", i.err.Error(), suffix)
+ if err := jsonv2.UnmarshalDecode(in, valPtr, opts); err != nil {
+ v.Value.Clear()
+ return err
}
- return fmt.Sprintf("%v%s", i.value, suffix)
+ value := reflect.ValueOf(valPtr).Elem().Interface()
+ v.Value = opt.ValueOf(value)
+ return nil
+}
+
+// MarshalJSON implements [json.Marshaler].
+func (v RawValue) MarshalJSON() ([]byte, error) {
+ return jsonv2.Marshal(v) // uses MarshalJSONV2
}
+
+// UnmarshalJSON implements [json.Unmarshaler].
+func (v *RawValue) UnmarshalJSON(b []byte) error {
+ return jsonv2.Unmarshal(b, v) // uses UnmarshalJSONV2
+}
+
+// RawValues is a map of keyed setting values that can be read from a JSON.
+type RawValues map[Key]RawValue
diff --git a/util/syspolicy/setting/raw_item_test.go b/util/syspolicy/setting/raw_item_test.go
new file mode 100644
index 0000000000000..05562d78c41f3
--- /dev/null
+++ b/util/syspolicy/setting/raw_item_test.go
@@ -0,0 +1,101 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package setting
+
+import (
+ "math"
+ "reflect"
+ "strconv"
+ "testing"
+
+ jsonv2 "github.com/go-json-experiment/json"
+)
+
+func TestMarshalUnmarshalRawValue(t *testing.T) {
+ tests := []struct {
+ name string
+ json string
+ want RawValue
+ wantErr bool
+ }{
+ {
+ name: "Bool/True",
+ json: `true`,
+ want: RawValueOf(true),
+ },
+ {
+ name: "Bool/False",
+ json: `false`,
+ want: RawValueOf(false),
+ },
+ {
+ name: "String/Empty",
+ json: `""`,
+ want: RawValueOf(""),
+ },
+ {
+ name: "String/NonEmpty",
+ json: `"Test"`,
+ want: RawValueOf("Test"),
+ },
+ {
+ name: "StringSlice/Null",
+ json: `null`,
+ want: RawValueOf([]string(nil)),
+ },
+ {
+ name: "StringSlice/Empty",
+ json: `[]`,
+ want: RawValueOf([]string{}),
+ },
+ {
+ name: "StringSlice/NonEmpty",
+ json: `["A", "B", "C"]`,
+ want: RawValueOf([]string{"A", "B", "C"}),
+ },
+ {
+ name: "StringSlice/NonStrings",
+ json: `[1, 2, 3]`,
+ wantErr: true,
+ },
+ {
+ name: "Number/Integer/0",
+ json: `0`,
+ want: RawValueOf(uint64(0)),
+ },
+ {
+ name: "Number/Integer/1",
+ json: `1`,
+ want: RawValueOf(uint64(1)),
+ },
+ {
+ name: "Number/Integer/MaxUInt64",
+ json: strconv.FormatUint(math.MaxUint64, 10),
+ want: RawValueOf(uint64(math.MaxUint64)),
+ },
+ {
+ name: "Number/Integer/Negative",
+ json: `-1`,
+ wantErr: true,
+ },
+ {
+ name: "Object",
+ json: `{}`,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var got RawValue
+ gotErr := jsonv2.Unmarshal([]byte(tt.json), &got)
+ if (gotErr != nil) != tt.wantErr {
+ t.Fatalf("Error: got %v; want %v", gotErr, tt.wantErr)
+ }
+
+ if !tt.wantErr && !reflect.DeepEqual(got, tt.want) {
+ t.Fatalf("Value: got %v; want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go
index 93be287b11e86..70fb0a931e250 100644
--- a/util/syspolicy/setting/setting.go
+++ b/util/syspolicy/setting/setting.go
@@ -243,6 +243,9 @@ func registerLocked(d *Definition) {
func settingDefinitions() (DefinitionMap, error) {
return definitions.GetErr(func() (DefinitionMap, error) {
+ if err := internal.Init.Do(); err != nil {
+ return nil, err
+ }
definitionsMu.Lock()
defer definitionsMu.Unlock()
definitionsUsed = true
diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go
index 512bc487c5b98..0af2bae0f480a 100644
--- a/util/syspolicy/setting/snapshot.go
+++ b/util/syspolicy/setting/snapshot.go
@@ -4,11 +4,14 @@
package setting
import (
+ "errors"
"iter"
"maps"
"slices"
"strings"
+ jsonv2 "github.com/go-json-experiment/json"
+ "github.com/go-json-experiment/json/jsontext"
xmaps "golang.org/x/exp/maps"
"tailscale.com/util/deephash"
)
@@ -65,6 +68,9 @@ func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) {
// Equal reports whether s and s2 are equal.
func (s *Snapshot) Equal(s2 *Snapshot) bool {
+ if s == s2 {
+ return true
+ }
if !s.EqualItems(s2) {
return false
}
@@ -135,6 +141,45 @@ func (s *Snapshot) String() string {
return sb.String()
}
+// snapshotJSON holds JSON-marshallable data for [Snapshot].
+type snapshotJSON struct {
+ Summary Summary `json:",omitzero"`
+ Settings map[Key]RawItem `json:",omitempty"`
+}
+
+// MarshalJSONV2 implements [jsonv2.MarshalerV2].
+func (s *Snapshot) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error {
+ data := &snapshotJSON{}
+ if s != nil {
+ data.Summary = s.summary
+ data.Settings = s.m
+ }
+ return jsonv2.MarshalEncode(out, data, opts)
+}
+
+// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2].
+func (s *Snapshot) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error {
+ if s == nil {
+ return errors.New("s must not be nil")
+ }
+ data := &snapshotJSON{}
+ if err := jsonv2.UnmarshalDecode(in, data, opts); err != nil {
+ return err
+ }
+ *s = Snapshot{m: data.Settings, sig: deephash.Hash(&data.Settings), summary: data.Summary}
+ return nil
+}
+
+// MarshalJSON implements [json.Marshaler].
+func (s *Snapshot) MarshalJSON() ([]byte, error) {
+ return jsonv2.Marshal(s) // uses MarshalJSONV2
+}
+
+// UnmarshalJSON implements [json.Unmarshaler].
+func (s *Snapshot) UnmarshalJSON(b []byte) error {
+ return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2
+}
+
// MergeSnapshots returns a [Snapshot] that contains all [RawItem]s
// from snapshot1 and snapshot2 and the [Summary] with the narrower [PolicyScope].
// If there's a conflict between policy settings in the two snapshots,
diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go
index e198d4a58bfdb..d41b362f06976 100644
--- a/util/syspolicy/setting/snapshot_test.go
+++ b/util/syspolicy/setting/snapshot_test.go
@@ -4,8 +4,13 @@
package setting
import (
+ "cmp"
+ "encoding/json"
"testing"
"time"
+
+ jsonv2 "github.com/go-json-experiment/json"
+ "tailscale.com/util/syspolicy/internal"
)
func TestMergeSnapshots(t *testing.T) {
@@ -30,134 +35,134 @@ func TestMergeSnapshots(t *testing.T) {
name: "first-nil",
s1: nil,
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}),
},
{
name: "first-empty",
s1: NewSnapshot(map[Key]RawItem{}),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
},
{
name: "second-nil",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}),
s2: nil,
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}),
},
{
name: "second-empty",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
s2: NewSnapshot(map[Key]RawItem{}),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
},
{
name: "no-conflicts",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
s2: NewSnapshot(map[Key]RawItem{
- "Setting4": {value: 2 * time.Hour},
- "Setting5": {value: VisibleByPolicy},
- "Setting6": {value: ShowChoiceByPolicy},
+ "Setting4": RawItemOf(2 * time.Hour),
+ "Setting5": RawItemOf(VisibleByPolicy),
+ "Setting6": RawItemOf(ShowChoiceByPolicy),
}),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
- "Setting4": {value: 2 * time.Hour},
- "Setting5": {value: VisibleByPolicy},
- "Setting6": {value: ShowChoiceByPolicy},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
+ "Setting4": RawItemOf(2 * time.Hour),
+ "Setting5": RawItemOf(VisibleByPolicy),
+ "Setting6": RawItemOf(ShowChoiceByPolicy),
}),
},
{
name: "with-conflicts",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 456},
- "Setting3": {value: false},
- "Setting4": {value: 2 * time.Hour},
+ "Setting1": RawItemOf(456),
+ "Setting3": RawItemOf(false),
+ "Setting4": RawItemOf(2 * time.Hour),
}),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 456},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
- "Setting4": {value: 2 * time.Hour},
+ "Setting1": RawItemOf(456),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
+ "Setting4": RawItemOf(2 * time.Hour),
}),
},
{
name: "with-scope-first-wins",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}, DeviceScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 456},
- "Setting3": {value: false},
- "Setting4": {value: 2 * time.Hour},
+ "Setting1": RawItemOf(456),
+ "Setting3": RawItemOf(false),
+ "Setting4": RawItemOf(2 * time.Hour),
}, CurrentUserScope),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
- "Setting4": {value: 2 * time.Hour},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
+ "Setting4": RawItemOf(2 * time.Hour),
}, CurrentUserScope),
},
{
name: "with-scope-second-wins",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}, CurrentUserScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 456},
- "Setting3": {value: false},
- "Setting4": {value: 2 * time.Hour},
+ "Setting1": RawItemOf(456),
+ "Setting3": RawItemOf(false),
+ "Setting4": RawItemOf(2 * time.Hour),
}, DeviceScope),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 456},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
- "Setting4": {value: 2 * time.Hour},
+ "Setting1": RawItemOf(456),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
+ "Setting4": RawItemOf(2 * time.Hour),
}, CurrentUserScope),
},
{
@@ -170,28 +175,27 @@ func TestMergeSnapshots(t *testing.T) {
name: "with-scope-first-empty",
s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true}},
- DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)),
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true)}, DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}, CurrentUserScope, NewNamedOrigin("TestPolicy", DeviceScope)),
},
{
name: "with-scope-second-empty",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}, CurrentUserScope),
s2: NewSnapshot(map[Key]RawItem{}),
want: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}, CurrentUserScope),
},
}
@@ -244,9 +248,9 @@ func TestSnapshotEqual(t *testing.T) {
name: "first-nil",
s1: nil,
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
wantEqual: false,
wantEqualItems: false,
@@ -255,9 +259,9 @@ func TestSnapshotEqual(t *testing.T) {
name: "first-empty",
s1: NewSnapshot(map[Key]RawItem{}),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
wantEqual: false,
wantEqualItems: false,
@@ -265,9 +269,9 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "second-nil",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: true},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(true),
}),
s2: nil,
wantEqual: false,
@@ -276,9 +280,9 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "second-empty",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
s2: NewSnapshot(map[Key]RawItem{}),
wantEqual: false,
@@ -287,14 +291,14 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "same-items-same-order-no-scope",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}),
wantEqual: true,
wantEqualItems: true,
@@ -302,14 +306,14 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "same-items-same-order-same-scope",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}, DeviceScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}, DeviceScope),
wantEqual: true,
wantEqualItems: true,
@@ -317,14 +321,14 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "same-items-different-order-same-scope",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}, DeviceScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting3": {value: false},
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
+ "Setting3": RawItemOf(false),
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
}, DeviceScope),
wantEqual: true,
wantEqualItems: true,
@@ -332,14 +336,14 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "same-items-same-order-different-scope",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}, DeviceScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}, CurrentUserScope),
wantEqual: false,
wantEqualItems: true,
@@ -347,14 +351,14 @@ func TestSnapshotEqual(t *testing.T) {
{
name: "different-items-same-scope",
s1: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 123},
- "Setting2": {value: "String"},
- "Setting3": {value: false},
+ "Setting1": RawItemOf(123),
+ "Setting2": RawItemOf("String"),
+ "Setting3": RawItemOf(false),
}, DeviceScope),
s2: NewSnapshot(map[Key]RawItem{
- "Setting4": {value: 2 * time.Hour},
- "Setting5": {value: VisibleByPolicy},
- "Setting6": {value: ShowChoiceByPolicy},
+ "Setting4": RawItemOf(2 * time.Hour),
+ "Setting5": RawItemOf(VisibleByPolicy),
+ "Setting6": RawItemOf(ShowChoiceByPolicy),
}, DeviceScope),
wantEqual: false,
wantEqualItems: false,
@@ -401,9 +405,9 @@ func TestSnapshotString(t *testing.T) {
{
name: "non-empty",
snapshot: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 2 * time.Hour},
- "Setting2": {value: VisibleByPolicy},
- "Setting3": {value: ShowChoiceByPolicy},
+ "Setting1": RawItemOf(2 * time.Hour),
+ "Setting2": RawItemOf(VisibleByPolicy),
+ "Setting3": RawItemOf(ShowChoiceByPolicy),
}, NewNamedOrigin("Test Policy", DeviceScope)),
wantString: `{Test Policy (Device)}
Setting1 = 2h0m0s
@@ -413,14 +417,14 @@ Setting3 = user-decides`,
{
name: "non-empty-with-item-origin",
snapshot: NewSnapshot(map[Key]RawItem{
- "Setting1": {value: 42, origin: NewNamedOrigin("Test Policy", DeviceScope)},
+ "Setting1": RawItemWith(42, nil, NewNamedOrigin("Test Policy", DeviceScope)),
}),
wantString: `Setting1 = 42 - {Test Policy (Device)}`,
},
{
name: "non-empty-with-item-error",
snapshot: NewSnapshot(map[Key]RawItem{
- "Setting1": {err: NewErrorText("bang!")},
+ "Setting1": RawItemWith(nil, NewErrorText("bang!"), nil),
}),
wantString: `Setting1 = Error{"bang!"}`,
},
@@ -433,3 +437,133 @@ Setting3 = user-decides`,
})
}
}
+
+func TestMarshalUnmarshalSnapshot(t *testing.T) {
+ tests := []struct {
+ name string
+ snapshot *Snapshot
+ wantJSON string
+ wantBack *Snapshot
+ }{
+ {
+ name: "Nil",
+ snapshot: (*Snapshot)(nil),
+ wantJSON: "null",
+ wantBack: NewSnapshot(nil),
+ },
+ {
+ name: "Zero",
+ snapshot: &Snapshot{},
+ wantJSON: "{}",
+ },
+ {
+ name: "Bool/True",
+ snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(true)}),
+ wantJSON: `{"Settings": {"BoolPolicy": {"Value": true}}}`,
+ },
+ {
+ name: "Bool/False",
+ snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(false)}),
+ wantJSON: `{"Settings": {"BoolPolicy": {"Value": false}}}`,
+ },
+ {
+ name: "String/Non-Empty",
+ snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("StringValue")}),
+ wantJSON: `{"Settings": {"StringPolicy": {"Value": "StringValue"}}}`,
+ },
+ {
+ name: "String/Empty",
+ snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("")}),
+ wantJSON: `{"Settings": {"StringPolicy": {"Value": ""}}}`,
+ },
+ {
+ name: "Integer/NonZero",
+ snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}),
+ wantJSON: `{"Settings": {"IntPolicy": {"Value": 42}}}`,
+ },
+ {
+ name: "Integer/Zero",
+ snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}),
+ wantJSON: `{"Settings": {"IntPolicy": {"Value": 0}}}`,
+ },
+ {
+ name: "String-List",
+ snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}),
+ wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`,
+ },
+ {
+ name: "Empty/With-Summary",
+ snapshot: NewSnapshot(
+ map[Key]RawItem{},
+ SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)),
+ ),
+ wantJSON: `{"Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"}}`,
+ },
+ {
+ name: "Setting/With-Summary",
+ snapshot: NewSnapshot(
+ map[Key]RawItem{"PolicySetting": RawItemOf(uint64(42))},
+ SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)),
+ ),
+ wantJSON: `{
+ "Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"},
+ "Settings": {"PolicySetting": {"Value": 42}}
+ }`,
+ },
+ {
+ name: "Settings/With-Origins",
+ snapshot: NewSnapshot(
+ map[Key]RawItem{
+ "SettingA": RawItemWith(uint64(42), nil, NewNamedOrigin("SourceA", DeviceScope)),
+ "SettingB": RawItemWith("B", nil, NewNamedOrigin("SourceB", CurrentProfileScope)),
+ "SettingC": RawItemWith(true, nil, NewNamedOrigin("SourceC", CurrentUserScope)),
+ },
+ ),
+ wantJSON: `{
+ "Settings": {
+ "SettingA": {"Value": 42, "Origin": {"Name": "SourceA", "Scope": "Device"}},
+ "SettingB": {"Value": "B", "Origin": {"Name": "SourceB", "Scope": "Profile"}},
+ "SettingC": {"Value": true, "Origin": {"Name": "SourceC", "Scope": "User"}}
+ }
+ }`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ doTest := func(t *testing.T, useJSONv2 bool) {
+ var gotJSON []byte
+ var err error
+ if useJSONv2 {
+ gotJSON, err = jsonv2.Marshal(tt.snapshot)
+ } else {
+ gotJSON, err = json.Marshal(tt.snapshot)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got, want, equal := internal.EqualJSONForTest(t, gotJSON, []byte(tt.wantJSON)); !equal {
+ t.Errorf("JSON: got %s; want %s", got, want)
+ }
+
+ gotBack := &Snapshot{}
+ if useJSONv2 {
+ err = jsonv2.Unmarshal(gotJSON, &gotBack)
+ } else {
+ err = json.Unmarshal(gotJSON, &gotBack)
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if wantBack := cmp.Or(tt.wantBack, tt.snapshot); !gotBack.Equal(wantBack) {
+ t.Errorf("Snapshot: got %+v; want %+v", gotBack, wantBack)
+ }
+ }
+
+ t.Run("json", func(t *testing.T) { doTest(t, false) })
+ t.Run("jsonv2", func(t *testing.T) { doTest(t, true) })
+ })
+ }
+}
diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go
new file mode 100644
index 0000000000000..299132b4e11b3
--- /dev/null
+++ b/util/syspolicy/source/env_policy_store.go
@@ -0,0 +1,159 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package source
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "tailscale.com/util/syspolicy/setting"
+)
+
+var lookupEnv = os.LookupEnv // test hook
+
+var _ Store = (*EnvPolicyStore)(nil)
+
+// EnvPolicyStore is a [Store] that reads policy settings from environment variables.
+type EnvPolicyStore struct{}
+
+// ReadString implements [Store].
+func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) {
+ _, str, err := s.lookupSettingVariable(key)
+ if err != nil {
+ return "", err
+ }
+ return str, nil
+}
+
+// ReadUInt64 implements [Store].
+func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) {
+ name, str, err := s.lookupSettingVariable(key)
+ if err != nil {
+ return 0, err
+ }
+ if str == "" {
+ return 0, setting.ErrNotConfigured
+ }
+ value, err := strconv.ParseUint(str, 0, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s: %w: %q is not a valid uint64", name, setting.ErrTypeMismatch, str)
+ }
+ return value, nil
+}
+
+// ReadBoolean implements [Store].
+func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) {
+ name, str, err := s.lookupSettingVariable(key)
+ if err != nil {
+ return false, err
+ }
+ if str == "" {
+ return false, setting.ErrNotConfigured
+ }
+ value, err := strconv.ParseBool(str)
+ if err != nil {
+ return false, fmt.Errorf("%s: %w: %q is not a valid bool", name, setting.ErrTypeMismatch, str)
+ }
+ return value, nil
+}
+
+// ReadStringArray implements [Store].
+func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) {
+ _, str, err := s.lookupSettingVariable(key)
+ if err != nil || str == "" {
+ return nil, err
+ }
+ var dst int
+ res := strings.Split(str, ",")
+ for src := range res {
+ res[dst] = strings.TrimSpace(res[src])
+ if res[dst] != "" {
+ dst++
+ }
+ }
+ return res[0:dst], nil
+}
+
+func (s *EnvPolicyStore) lookupSettingVariable(key setting.Key) (name, value string, err error) {
+ name, err = keyToEnvVarName(key)
+ if err != nil {
+ return "", "", err
+ }
+ value, ok := lookupEnv(name)
+ if !ok {
+ return name, "", setting.ErrNotConfigured
+ }
+ return name, value, nil
+}
+
+var (
+ errEmptyKey = errors.New("key must not be empty")
+ errInvalidKey = errors.New("key must consist of alphanumeric characters and slashes")
+)
+
+// keyToEnvVarName returns the environment variable name for a given policy
+// setting key, or an error if the key is invalid. It converts CamelCase keys into
+// underscore-separated words and prepends the variable name with the TS prefix.
+// For example: AuthKey => TS_AUTH_KEY, ExitNodeAllowLANAccess => TS_EXIT_NODE_ALLOW_LAN_ACCESS, etc.
+//
+// It's fine to use this in [EnvPolicyStore] without caching variable names since it's not a hot path.
+// [EnvPolicyStore] is not a [Changeable] policy store, so the conversion will only happen once.
+func keyToEnvVarName(key setting.Key) (string, error) {
+ if len(key) == 0 {
+ return "", errEmptyKey
+ }
+
+ isLower := func(c byte) bool { return 'a' <= c && c <= 'z' }
+ isUpper := func(c byte) bool { return 'A' <= c && c <= 'Z' }
+ isLetter := func(c byte) bool { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') }
+ isDigit := func(c byte) bool { return '0' <= c && c <= '9' }
+
+ words := make([]string, 0, 8)
+ words = append(words, "TS_DEBUGSYSPOLICY")
+ var currentWord strings.Builder
+ for i := 0; i < len(key); i++ {
+ c := key[i]
+ if c >= utf8.RuneSelf {
+ return "", errInvalidKey
+ }
+
+ var split bool
+ switch {
+ case isLower(c):
+ c -= 'a' - 'A' // make upper
+ split = currentWord.Len() > 0 && !isLetter(key[i-1])
+ case isUpper(c):
+ if currentWord.Len() > 0 {
+ prevUpper := isUpper(key[i-1])
+ nextLower := i < len(key)-1 && isLower(key[i+1])
+ split = !prevUpper || nextLower // split on case transition
+ }
+ case isDigit(c):
+ split = currentWord.Len() > 0 && !isDigit(key[i-1])
+ case c == setting.KeyPathSeparator:
+ words = append(words, currentWord.String())
+ currentWord.Reset()
+ continue
+ default:
+ return "", errInvalidKey
+ }
+
+ if split {
+ words = append(words, currentWord.String())
+ currentWord.Reset()
+ }
+
+ currentWord.WriteByte(c)
+ }
+
+ if currentWord.Len() > 0 {
+ words = append(words, currentWord.String())
+ }
+
+ return strings.Join(words, "_"), nil
+}
diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go
new file mode 100644
index 0000000000000..9eacf6378b450
--- /dev/null
+++ b/util/syspolicy/source/env_policy_store_test.go
@@ -0,0 +1,359 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package source
+
+import (
+ "cmp"
+ "errors"
+ "math"
+ "reflect"
+ "strconv"
+ "testing"
+
+ "tailscale.com/util/syspolicy/setting"
+)
+
+func TestKeyToEnvVarName(t *testing.T) {
+ tests := []struct {
+ name string
+ key setting.Key
+ want string // suffix after "TS_DEBUGSYSPOLICY_"
+ wantErr error
+ }{
+ {
+ name: "empty",
+ key: "",
+ wantErr: errEmptyKey,
+ },
+ {
+ name: "lowercase",
+ key: "tailnet",
+ want: "TAILNET",
+ },
+ {
+ name: "CamelCase",
+ key: "AuthKey",
+ want: "AUTH_KEY",
+ },
+ {
+ name: "LongerCamelCase",
+ key: "ManagedByOrganizationName",
+ want: "MANAGED_BY_ORGANIZATION_NAME",
+ },
+ {
+ name: "UPPERCASE",
+ key: "UPPERCASE",
+ want: "UPPERCASE",
+ },
+ {
+ name: "WithAbbrev/Front",
+ key: "DNSServer",
+ want: "DNS_SERVER",
+ },
+ {
+ name: "WithAbbrev/Middle",
+ key: "ExitNodeAllowLANAccess",
+ want: "EXIT_NODE_ALLOW_LAN_ACCESS",
+ },
+ {
+ name: "WithAbbrev/Back",
+ key: "ExitNodeID",
+ want: "EXIT_NODE_ID",
+ },
+ {
+ name: "WithDigits/Single/Front",
+ key: "0TestKey",
+ want: "0_TEST_KEY",
+ },
+ {
+ name: "WithDigits/Multi/Front",
+ key: "64TestKey",
+ want: "64_TEST_KEY",
+ },
+ {
+ name: "WithDigits/Single/Middle",
+ key: "Test0Key",
+ want: "TEST_0_KEY",
+ },
+ {
+ name: "WithDigits/Multi/Middle",
+ key: "Test64Key",
+ want: "TEST_64_KEY",
+ },
+ {
+ name: "WithDigits/Single/Back",
+ key: "TestKey0",
+ want: "TEST_KEY_0",
+ },
+ {
+ name: "WithDigits/Multi/Back",
+ key: "TestKey64",
+ want: "TEST_KEY_64",
+ },
+ {
+ name: "WithDigits/Multi/Back",
+ key: "TestKey64",
+ want: "TEST_KEY_64",
+ },
+ {
+ name: "WithPathSeparators/Single",
+ key: "Key/Subkey",
+ want: "KEY_SUBKEY",
+ },
+ {
+ name: "WithPathSeparators/Multi",
+ key: "Root/Level1/Level2",
+ want: "ROOT_LEVEL_1_LEVEL_2",
+ },
+ {
+ name: "Mixed",
+ key: "Network/DNSServer/IPAddress",
+ want: "NETWORK_DNS_SERVER_IP_ADDRESS",
+ },
+ {
+ name: "Non-Alphanumeric/NonASCII/1",
+ key: "ж",
+ wantErr: errInvalidKey,
+ },
+ {
+ name: "Non-Alphanumeric/NonASCII/2",
+ key: "KeyжName",
+ wantErr: errInvalidKey,
+ },
+ {
+ name: "Non-Alphanumeric/Space",
+ key: "Key Name",
+ wantErr: errInvalidKey,
+ },
+ {
+ name: "Non-Alphanumeric/Punct",
+ key: "Key!Name",
+ wantErr: errInvalidKey,
+ },
+ {
+ name: "Non-Alphanumeric/Backslash",
+ key: `Key\Name`,
+ wantErr: errInvalidKey,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(cmp.Or(tt.name, string(tt.key)), func(t *testing.T) {
+ got, err := keyToEnvVarName(tt.key)
+ checkError(t, err, tt.wantErr, true)
+
+ want := tt.want
+ if want != "" {
+ want = "TS_DEBUGSYSPOLICY_" + want
+ }
+ if got != want {
+ t.Fatalf("got %q; want %q", got, want)
+ }
+ })
+ }
+}
+
+func TestEnvPolicyStore(t *testing.T) {
+ blankEnv := func(string) (string, bool) { return "", false }
+ makeEnv := func(wantName, value string) func(string) (string, bool) {
+ wantName = "TS_DEBUGSYSPOLICY_" + wantName
+ return func(gotName string) (string, bool) {
+ if gotName != wantName {
+ return "", false
+ }
+ return value, true
+ }
+ }
+ tests := []struct {
+ name string
+ key setting.Key
+ lookup func(string) (string, bool)
+ want any
+ wantErr error
+ }{
+ {
+ name: "NotConfigured/String",
+ key: "AuthKey",
+ lookup: blankEnv,
+ wantErr: setting.ErrNotConfigured,
+ want: "",
+ },
+ {
+ name: "Configured/String/Empty",
+ key: "AuthKey",
+ lookup: makeEnv("AUTH_KEY", ""),
+ want: "",
+ },
+ {
+ name: "Configured/String/NonEmpty",
+ key: "AuthKey",
+ lookup: makeEnv("AUTH_KEY", "ABC123"),
+ want: "ABC123",
+ },
+ {
+ name: "NotConfigured/UInt64",
+ key: "IntegerSetting",
+ lookup: blankEnv,
+ wantErr: setting.ErrNotConfigured,
+ want: uint64(0),
+ },
+ {
+ name: "Configured/UInt64/Empty",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", ""),
+ wantErr: setting.ErrNotConfigured,
+ want: uint64(0),
+ },
+ {
+ name: "Configured/UInt64/Zero",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", "0"),
+ want: uint64(0),
+ },
+ {
+ name: "Configured/UInt64/NonZero",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", "12345"),
+ want: uint64(12345),
+ },
+ {
+ name: "Configured/UInt64/MaxUInt64",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", strconv.FormatUint(math.MaxUint64, 10)),
+ want: uint64(math.MaxUint64),
+ },
+ {
+ name: "Configured/UInt64/Negative",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", "-1"),
+ wantErr: setting.ErrTypeMismatch,
+ want: uint64(0),
+ },
+ {
+ name: "Configured/UInt64/Hex",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", "0xDEADBEEF"),
+ want: uint64(0xDEADBEEF),
+ },
+ {
+ name: "NotConfigured/Bool",
+ key: "LogSCMInteractions",
+ lookup: blankEnv,
+ wantErr: setting.ErrNotConfigured,
+ want: false,
+ },
+ {
+ name: "Configured/Bool/Empty",
+ key: "LogSCMInteractions",
+ lookup: makeEnv("LOG_SCM_INTERACTIONS", ""),
+ wantErr: setting.ErrNotConfigured,
+ want: false,
+ },
+ {
+ name: "Configured/Bool/True",
+ key: "LogSCMInteractions",
+ lookup: makeEnv("LOG_SCM_INTERACTIONS", "true"),
+ want: true,
+ },
+ {
+ name: "Configured/Bool/False",
+ key: "LogSCMInteractions",
+ lookup: makeEnv("LOG_SCM_INTERACTIONS", "False"),
+ want: false,
+ },
+ {
+ name: "Configured/Bool/1",
+ key: "LogSCMInteractions",
+ lookup: makeEnv("LOG_SCM_INTERACTIONS", "1"),
+ want: true,
+ },
+ {
+ name: "Configured/Bool/0",
+ key: "LogSCMInteractions",
+ lookup: makeEnv("LOG_SCM_INTERACTIONS", "0"),
+ want: false,
+ },
+ {
+ name: "Configured/Bool/Invalid",
+ key: "IntegerSetting",
+ lookup: makeEnv("INTEGER_SETTING", "NotABool"),
+ wantErr: setting.ErrTypeMismatch,
+ want: false,
+ },
+ {
+ name: "NotConfigured/StringArray",
+ key: "AllowedSuggestedExitNodes",
+ lookup: blankEnv,
+ wantErr: setting.ErrNotConfigured,
+ want: []string(nil),
+ },
+ {
+ name: "Configured/StringArray/Empty",
+ key: "AllowedSuggestedExitNodes",
+ lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", ""),
+ want: []string(nil),
+ },
+ {
+ name: "Configured/StringArray/Spaces",
+ key: "AllowedSuggestedExitNodes",
+ lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", " \t "),
+ want: []string{},
+ },
+ {
+ name: "Configured/StringArray/Single",
+ key: "AllowedSuggestedExitNodes",
+ lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", "NodeA"),
+ want: []string{"NodeA"},
+ },
+ {
+ name: "Configured/StringArray/Multi",
+ key: "AllowedSuggestedExitNodes",
+ lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,NodeB,NodeC"),
+ want: []string{"NodeA", "NodeB", "NodeC"},
+ },
+ {
+ name: "Configured/StringArray/WithBlank",
+ key: "AllowedSuggestedExitNodes",
+ lookup: makeEnv("ALLOWED_SUGGESTED_EXIT_NODES", "NodeA,\t,, ,NodeB"),
+ want: []string{"NodeA", "NodeB"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(cmp.Or(tt.name, string(tt.key)), func(t *testing.T) {
+ oldLookupEnv := lookupEnv
+ t.Cleanup(func() { lookupEnv = oldLookupEnv })
+ lookupEnv = tt.lookup
+
+ var got any
+ var err error
+ var store EnvPolicyStore
+ switch tt.want.(type) {
+ case string:
+ got, err = store.ReadString(tt.key)
+ case uint64:
+ got, err = store.ReadUInt64(tt.key)
+ case bool:
+ got, err = store.ReadBoolean(tt.key)
+ case []string:
+ got, err = store.ReadStringArray(tt.key)
+ }
+ checkError(t, err, tt.wantErr, false)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("got %v; want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func checkError(tb testing.TB, got, want error, fatal bool) {
+ tb.Helper()
+ f := tb.Errorf
+ if fatal {
+ f = tb.Fatalf
+ }
+ if (want == nil && got != nil) ||
+ (want != nil && got == nil) ||
+ (want != nil && got != nil && !errors.Is(got, want) && want.Error() != got.Error()) {
+ f("gotErr: %v; wantErr: %v", got, want)
+ }
+}
diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go
index f526b4ce1c666..86e2254e0a381 100644
--- a/util/syspolicy/source/policy_store_windows.go
+++ b/util/syspolicy/source/policy_store_windows.go
@@ -319,9 +319,9 @@ func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error
// If there are no [setting.KeyPathSeparator]s in the key, the policy setting value
// is meant to be stored directly under {HKLM,HKCU}\Software\Policies\Tailscale.
func splitSettingKey(key setting.Key) (path, valueName string) {
- if idx := strings.LastIndex(string(key), setting.KeyPathSeparator); idx != -1 {
- path = strings.ReplaceAll(string(key[:idx]), setting.KeyPathSeparator, `\`)
- valueName = string(key[idx+len(setting.KeyPathSeparator):])
+ if idx := strings.LastIndexByte(string(key), setting.KeyPathSeparator); idx != -1 {
+ path = strings.ReplaceAll(string(key[:idx]), string(setting.KeyPathSeparator), `\`)
+ valueName = string(key[idx+1:])
return path, valueName
}
return "", string(key)
diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go
index bb8e164fb414a..1f19bbb4386b9 100644
--- a/util/syspolicy/source/test_store.go
+++ b/util/syspolicy/source/test_store.go
@@ -89,6 +89,7 @@ type TestStore struct {
suspendCount int // change callback are suspended if > 0
mr, mw map[setting.Key]any // maps for reading and writing; they're the same unless the store is suspended.
cbs set.HandleSet[func()]
+ closed bool
readsMu sync.Mutex
reads map[testReadOperation]int // how many times a policy setting was read
@@ -98,24 +99,20 @@ type TestStore struct {
// The tb will be used to report coding errors detected by the [TestStore].
func NewTestStore(tb internal.TB) *TestStore {
m := make(map[setting.Key]any)
- return &TestStore{
+ store := &TestStore{
tb: tb,
done: make(chan struct{}),
mr: m,
mw: m,
}
+ tb.Cleanup(store.Close)
+ return store
}
// NewTestStoreOf is a shorthand for [NewTestStore] followed by [TestStore.SetBooleans],
// [TestStore.SetUInt64s], [TestStore.SetStrings] or [TestStore.SetStringLists].
func NewTestStoreOf[T TestValueType](tb internal.TB, settings ...TestSetting[T]) *TestStore {
- m := make(map[setting.Key]any)
- store := &TestStore{
- tb: tb,
- done: make(chan struct{}),
- mr: m,
- mw: m,
- }
+ store := NewTestStore(tb)
switch settings := any(settings).(type) {
case []TestSetting[bool]:
store.SetBooleans(settings...)
@@ -308,7 +305,7 @@ func (s *TestStore) Resume() {
s.mr = s.mw
s.mu.Unlock()
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
case s.suspendCount < 0:
s.tb.Fatal("negative suspendCount")
default:
@@ -333,7 +330,7 @@ func (s *TestStore) SetBooleans(settings ...TestSetting[bool]) {
s.mu.Unlock()
}
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
}
// SetUInt64s sets the specified integer settings in s.
@@ -352,7 +349,7 @@ func (s *TestStore) SetUInt64s(settings ...TestSetting[uint64]) {
s.mu.Unlock()
}
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
}
// SetStrings sets the specified string settings in s.
@@ -371,7 +368,7 @@ func (s *TestStore) SetStrings(settings ...TestSetting[string]) {
s.mu.Unlock()
}
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
}
// SetStrings sets the specified string list settings in s.
@@ -390,7 +387,7 @@ func (s *TestStore) SetStringLists(settings ...TestSetting[[]string]) {
s.mu.Unlock()
}
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
}
// Delete deletes the specified settings from s.
@@ -402,7 +399,7 @@ func (s *TestStore) Delete(keys ...setting.Key) {
s.mu.Unlock()
}
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
}
// Clear deletes all settings from s.
@@ -412,10 +409,10 @@ func (s *TestStore) Clear() {
clear(s.mw)
s.mu.Unlock()
s.storeLock.Unlock()
- s.notifyPolicyChanged()
+ s.NotifyPolicyChanged()
}
-func (s *TestStore) notifyPolicyChanged() {
+func (s *TestStore) NotifyPolicyChanged() {
s.mu.RLock()
if s.suspendCount != 0 {
s.mu.RUnlock()
@@ -439,9 +436,9 @@ func (s *TestStore) notifyPolicyChanged() {
func (s *TestStore) Close() {
s.mu.Lock()
defer s.mu.Unlock()
- if s.done != nil {
+ if !s.closed {
close(s.done)
- s.done = nil
+ s.closed = true
}
}
diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go
index abe42ed90f8c7..d925731c38b3a 100644
--- a/util/syspolicy/syspolicy.go
+++ b/util/syspolicy/syspolicy.go
@@ -1,51 +1,82 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
-// Package syspolicy provides functions to retrieve system settings of a device.
+// Package syspolicy facilitates retrieval of the current policy settings
+// applied to the device or user and receiving notifications when the policy
+// changes.
+//
+// It provides functions that return specific policy settings by their unique
+// [setting.Key]s, such as [GetBoolean], [GetUint64], [GetString],
+// [GetStringArray], [GetPreferenceOption], [GetVisibility] and [GetDuration].
package syspolicy
import (
"errors"
+ "fmt"
+ "reflect"
"time"
"tailscale.com/util/syspolicy/internal/loggerx"
+ "tailscale.com/util/syspolicy/rsop"
"tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
)
-func GetString(key Key, defaultValue string) (string, error) {
- markHandlerInUse()
- v, err := handler.ReadString(string(key))
- if errors.Is(err, ErrNoSuchKey) {
- return defaultValue, nil
+var (
+ // ErrNotConfigured is returned when the requested policy setting is not configured.
+ ErrNotConfigured = setting.ErrNotConfigured
+ // ErrTypeMismatch is returned when there's a type mismatch between the actual type
+ // of the setting value and the expected type.
+ ErrTypeMismatch = setting.ErrTypeMismatch
+ // ErrNoSuchKey is returned by [setting.DefinitionOf] when no policy setting
+ // has been registered with the specified key.
+ //
+ // This error is also returned by a (now deprecated) [Handler] when the specified
+ // key does not have a value set. While the package maintains compatibility with this
+ // usage of ErrNoSuchKey, it is recommended to return [ErrNotConfigured] from newer
+ // [source.Store] implementations.
+ ErrNoSuchKey = setting.ErrNoSuchKey
+)
+
+// RegisterStore registers a new policy [source.Store] with the specified name and [setting.PolicyScope].
+//
+// It is a shorthand for [rsop.RegisterStore].
+func RegisterStore(name string, scope setting.PolicyScope, store source.Store) (*rsop.StoreRegistration, error) {
+ return rsop.RegisterStore(name, scope, store)
+}
+
+// MustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered.
+func MustRegisterStoreForTest(tb TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration {
+ tb.Helper()
+ reg, err := rsop.RegisterStoreForTest(tb, name, scope, store)
+ if err != nil {
+ tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err)
}
- return v, err
+ return reg
+}
+
+// GetString returns a string policy setting with the specified key,
+// or defaultValue if it does not exist.
+func GetString(key Key, defaultValue string) (string, error) {
+ return getCurrentPolicySettingValue(key, defaultValue)
}
+// GetUint64 returns a numeric policy setting with the specified key,
+// or defaultValue if it does not exist.
func GetUint64(key Key, defaultValue uint64) (uint64, error) {
- markHandlerInUse()
- v, err := handler.ReadUInt64(string(key))
- if errors.Is(err, ErrNoSuchKey) {
- return defaultValue, nil
- }
- return v, err
+ return getCurrentPolicySettingValue(key, defaultValue)
}
+// GetBoolean returns a boolean policy setting with the specified key,
+// or defaultValue if it does not exist.
func GetBoolean(key Key, defaultValue bool) (bool, error) {
- markHandlerInUse()
- v, err := handler.ReadBoolean(string(key))
- if errors.Is(err, ErrNoSuchKey) {
- return defaultValue, nil
- }
- return v, err
+ return getCurrentPolicySettingValue(key, defaultValue)
}
+// GetStringArray returns a multi-string policy setting with the specified key,
+// or defaultValue if it does not exist.
func GetStringArray(key Key, defaultValue []string) ([]string, error) {
- markHandlerInUse()
- v, err := handler.ReadStringArray(string(key))
- if errors.Is(err, ErrNoSuchKey) {
- return defaultValue, nil
- }
- return v, err
+ return getCurrentPolicySettingValue(key, defaultValue)
}
// GetPreferenceOption loads a policy from the registry that can be
@@ -55,13 +86,7 @@ func GetStringArray(key Key, defaultValue []string) ([]string, error) {
// "always" and "never" remove the user's ability to make a selection. If not
// present or set to a different value, "user-decides" is the default.
func GetPreferenceOption(name Key) (setting.PreferenceOption, error) {
- s, err := GetString(name, "user-decides")
- if err != nil {
- return setting.ShowChoiceByPolicy, err
- }
- var opt setting.PreferenceOption
- err = opt.UnmarshalText([]byte(s))
- return opt, err
+ return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy)
}
// GetVisibility loads a policy from the registry that can be managed
@@ -70,13 +95,7 @@ func GetPreferenceOption(name Key) (setting.PreferenceOption, error) {
// true) or "hide" (return true). If not present or set to a different value,
// "show" (return false) is the default.
func GetVisibility(name Key) (setting.Visibility, error) {
- s, err := GetString(name, "show")
- if err != nil {
- return setting.VisibleByPolicy, err
- }
- var visibility setting.Visibility
- visibility.UnmarshalText([]byte(s))
- return visibility, nil
+ return getCurrentPolicySettingValue(name, setting.VisibleByPolicy)
}
// GetDuration loads a policy from the registry that can be managed
@@ -85,15 +104,58 @@ func GetVisibility(name Key) (setting.Visibility, error) {
// understands. If the registry value is "" or can not be processed,
// defaultValue is returned instead.
func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) {
- opt, err := GetString(name, "")
- if opt == "" || err != nil {
- return defaultValue, err
+ d, err := getCurrentPolicySettingValue(name, defaultValue)
+ if err != nil {
+ return d, err
}
- v, err := time.ParseDuration(opt)
- if err != nil || v < 0 {
+ if d < 0 {
return defaultValue, nil
}
- return v, nil
+ return d, nil
+}
+
+// RegisterChangeCallback adds a function that will be called whenever the effective policy
+// for the default scope changes. The returned function can be used to unregister the callback.
+func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) {
+ effective, err := rsop.PolicyFor(setting.DefaultScope())
+ if err != nil {
+ return nil, err
+ }
+ return effective.RegisterChangeCallback(cb), nil
+}
+
+// getCurrentPolicySettingValue returns the value of the policy setting
+// specified by its key from the [rsop.Policy] of the [setting.DefaultScope]. It
+// returns def if the policy setting is not configured, or an error if it has
+// an error or could not be converted to the specified type T.
+func getCurrentPolicySettingValue[T setting.ValueType](key Key, def T) (T, error) {
+ effective, err := rsop.PolicyFor(setting.DefaultScope())
+ if err != nil {
+ return def, err
+ }
+ value, err := effective.Get().GetErr(key)
+ if err != nil {
+ if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) {
+ return def, nil
+ }
+ return def, err
+ }
+ if res, ok := value.(T); ok {
+ return res, nil
+ }
+ return convertPolicySettingValueTo(value, def)
+}
+
+func convertPolicySettingValueTo[T setting.ValueType](value any, def T) (T, error) {
+ // Convert [PreferenceOption], [Visibility], or [time.Duration] back to a string
+ // if someone requests a string instead of the actual setting's value.
+ // TODO(nickkhyl): check if this behavior is relied upon anywhere besides the old tests.
+ if reflect.TypeFor[T]().Kind() == reflect.String {
+ if str, ok := value.(fmt.Stringer); ok {
+ return any(str.String()).(T), nil
+ }
+ }
+ return def, fmt.Errorf("%w: got %T, want %T", setting.ErrTypeMismatch, value, def)
}
// SelectControlURL returns the ControlURL to use based on a value in
diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go
index 8280aa1dfbdac..a70a49d395c22 100644
--- a/util/syspolicy/syspolicy_test.go
+++ b/util/syspolicy/syspolicy_test.go
@@ -9,57 +9,15 @@ import (
"testing"
"time"
+ "tailscale.com/types/logger"
+ "tailscale.com/util/syspolicy/internal/loggerx"
+ "tailscale.com/util/syspolicy/internal/metrics"
"tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
)
-// testHandler encompasses all data types returned when testing any of the syspolicy
-// methods that involve getting a policy value.
-// For keys and the corresponding values, check policy_keys.go.
-type testHandler struct {
- t *testing.T
- key Key
- s string
- u64 uint64
- b bool
- sArr []string
- err error
- calls int // used for testing reads from cache vs. handler
-}
-
var someOtherError = errors.New("error other than not found")
-func (th *testHandler) ReadString(key string) (string, error) {
- if key != string(th.key) {
- th.t.Errorf("ReadString(%q) want %q", key, th.key)
- }
- th.calls++
- return th.s, th.err
-}
-
-func (th *testHandler) ReadUInt64(key string) (uint64, error) {
- if key != string(th.key) {
- th.t.Errorf("ReadUint64(%q) want %q", key, th.key)
- }
- th.calls++
- return th.u64, th.err
-}
-
-func (th *testHandler) ReadBoolean(key string) (bool, error) {
- if key != string(th.key) {
- th.t.Errorf("ReadBool(%q) want %q", key, th.key)
- }
- th.calls++
- return th.b, th.err
-}
-
-func (th *testHandler) ReadStringArray(key string) ([]string, error) {
- if key != string(th.key) {
- th.t.Errorf("ReadStringArray(%q) want %q", key, th.key)
- }
- th.calls++
- return th.sArr, th.err
-}
-
func TestGetString(t *testing.T) {
tests := []struct {
name string
@@ -69,23 +27,28 @@ func TestGetString(t *testing.T) {
defaultValue string
wantValue string
wantError error
+ wantMetrics []metrics.TestState
}{
{
name: "read existing value",
key: AdminConsoleVisibility,
handlerValue: "hide",
wantValue: "hide",
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AdminConsole", Value: 1},
+ },
},
{
name: "read non-existing value",
key: EnableServerMode,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantError: nil,
},
{
name: "read non-existing value, non-blank default",
key: EnableServerMode,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
defaultValue: "test",
wantValue: "test",
wantError: nil,
@@ -95,24 +58,43 @@ func TestGetString(t *testing.T) {
key: NetworkDevicesVisibility,
handlerError: someOtherError,
wantError: someOtherError,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_NetworkDevices_error", Value: 1},
+ },
},
}
+ RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- s: tt.handlerValue,
- err: tt.handlerError,
- })
+ h := metrics.NewTestHandler(t)
+ metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric)
+
+ s := source.TestSetting[string]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
value, err := GetString(tt.key, tt.defaultValue)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if value != tt.wantValue {
t.Errorf("value=%v, want %v", value, tt.wantValue)
}
+ wantMetrics := tt.wantMetrics
+ if !metrics.ShouldReport() {
+ // Check that metrics are not reported on platforms
+ // where they shouldn't be reported.
+ // As of 2024-09-04, syspolicy only reports metrics
+ // on Windows and Android.
+ wantMetrics = nil
+ }
+ h.MustEqual(wantMetrics...)
})
}
}
@@ -129,7 +111,7 @@ func TestGetUint64(t *testing.T) {
}{
{
name: "read existing value",
- key: KeyExpirationNoticeTime,
+ key: LogSCMInteractions,
handlerValue: 1,
wantValue: 1,
},
@@ -137,14 +119,14 @@ func TestGetUint64(t *testing.T) {
name: "read non-existing value",
key: LogSCMInteractions,
handlerValue: 0,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: 0,
},
{
name: "read non-existing value, non-zero default",
key: LogSCMInteractions,
defaultValue: 2,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: 2,
},
{
@@ -157,14 +139,23 @@ func TestGetUint64(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- u64: tt.handlerValue,
- err: tt.handlerError,
- })
+ // None of the policy settings tested here are integers.
+ // In fact, we don't have any integer policies as of 2024-10-08.
+ // However, we can register each of them as an integer policy setting
+ // for the duration of the test, providing us with something to test against.
+ if err := setting.SetDefinitionsForTest(t, setting.NewDefinition(tt.key, setting.DeviceSetting, setting.IntegerValue)); err != nil {
+ t.Fatalf("SetDefinitionsForTest failed: %v", err)
+ }
+
+ s := source.TestSetting[uint64]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
value, err := GetUint64(tt.key, tt.defaultValue)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if value != tt.wantValue {
@@ -183,45 +174,69 @@ func TestGetBoolean(t *testing.T) {
defaultValue bool
wantValue bool
wantError error
+ wantMetrics []metrics.TestState
}{
{
name: "read existing value",
key: FlushDNSOnSessionUnlock,
handlerValue: true,
wantValue: true,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_FlushDNSOnSessionUnlock", Value: 1},
+ },
},
{
name: "read non-existing value",
key: LogSCMInteractions,
handlerValue: false,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: false,
},
{
name: "reading value returns other error",
key: FlushDNSOnSessionUnlock,
handlerError: someOtherError,
- wantError: someOtherError,
+ wantError: someOtherError, // expect error...
defaultValue: true,
- wantValue: false,
+ wantValue: true, // ...AND default value if the handler fails.
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_FlushDNSOnSessionUnlock_error", Value: 1},
+ },
},
}
+ RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- b: tt.handlerValue,
- err: tt.handlerError,
- })
+ h := metrics.NewTestHandler(t)
+ metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric)
+
+ s := source.TestSetting[bool]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
value, err := GetBoolean(tt.key, tt.defaultValue)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if value != tt.wantValue {
t.Errorf("value=%v, want %v", value, tt.wantValue)
}
+ wantMetrics := tt.wantMetrics
+ if !metrics.ShouldReport() {
+ // Check that metrics are not reported on platforms
+ // where they shouldn't be reported.
+ // As of 2024-09-04, syspolicy only reports metrics
+ // on Windows and Android.
+ wantMetrics = nil
+ }
+ h.MustEqual(wantMetrics...)
})
}
}
@@ -234,29 +249,42 @@ func TestGetPreferenceOption(t *testing.T) {
handlerError error
wantValue setting.PreferenceOption
wantError error
+ wantMetrics []metrics.TestState
}{
{
name: "always by policy",
key: EnableIncomingConnections,
handlerValue: "always",
wantValue: setting.AlwaysByPolicy,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1},
+ },
},
{
name: "never by policy",
key: EnableIncomingConnections,
handlerValue: "never",
wantValue: setting.NeverByPolicy,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1},
+ },
},
{
name: "use default",
key: EnableIncomingConnections,
handlerValue: "",
wantValue: setting.ShowChoiceByPolicy,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1},
+ },
},
{
name: "read non-existing value",
key: EnableIncomingConnections,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: setting.ShowChoiceByPolicy,
},
{
@@ -265,24 +293,43 @@ func TestGetPreferenceOption(t *testing.T) {
handlerError: someOtherError,
wantValue: setting.ShowChoiceByPolicy,
wantError: someOtherError,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_AllowIncomingConnections_error", Value: 1},
+ },
},
}
+ RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- s: tt.handlerValue,
- err: tt.handlerError,
- })
+ h := metrics.NewTestHandler(t)
+ metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric)
+
+ s := source.TestSetting[string]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
option, err := GetPreferenceOption(tt.key)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if option != tt.wantValue {
t.Errorf("option=%v, want %v", option, tt.wantValue)
}
+ wantMetrics := tt.wantMetrics
+ if !metrics.ShouldReport() {
+ // Check that metrics are not reported on platforms
+ // where they shouldn't be reported.
+ // As of 2024-09-04, syspolicy only reports metrics
+ // on Windows and Android.
+ wantMetrics = nil
+ }
+ h.MustEqual(wantMetrics...)
})
}
}
@@ -295,24 +342,33 @@ func TestGetVisibility(t *testing.T) {
handlerError error
wantValue setting.Visibility
wantError error
+ wantMetrics []metrics.TestState
}{
{
name: "hidden by policy",
key: AdminConsoleVisibility,
handlerValue: "hide",
wantValue: setting.HiddenByPolicy,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AdminConsole", Value: 1},
+ },
},
{
name: "visibility default",
key: AdminConsoleVisibility,
handlerValue: "show",
wantValue: setting.VisibleByPolicy,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AdminConsole", Value: 1},
+ },
},
{
name: "read non-existing value",
key: AdminConsoleVisibility,
handlerValue: "show",
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: setting.VisibleByPolicy,
},
{
@@ -322,24 +378,43 @@ func TestGetVisibility(t *testing.T) {
handlerError: someOtherError,
wantValue: setting.VisibleByPolicy,
wantError: someOtherError,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_AdminConsole_error", Value: 1},
+ },
},
}
+ RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- s: tt.handlerValue,
- err: tt.handlerError,
- })
+ h := metrics.NewTestHandler(t)
+ metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric)
+
+ s := source.TestSetting[string]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
visibility, err := GetVisibility(tt.key)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if visibility != tt.wantValue {
t.Errorf("visibility=%v, want %v", visibility, tt.wantValue)
}
+ wantMetrics := tt.wantMetrics
+ if !metrics.ShouldReport() {
+ // Check that metrics are not reported on platforms
+ // where they shouldn't be reported.
+ // As of 2024-09-04, syspolicy only reports metrics
+ // on Windows and Android.
+ wantMetrics = nil
+ }
+ h.MustEqual(wantMetrics...)
})
}
}
@@ -353,6 +428,7 @@ func TestGetDuration(t *testing.T) {
defaultValue time.Duration
wantValue time.Duration
wantError error
+ wantMetrics []metrics.TestState
}{
{
name: "read existing value",
@@ -360,25 +436,34 @@ func TestGetDuration(t *testing.T) {
handlerValue: "2h",
wantValue: 2 * time.Hour,
defaultValue: 24 * time.Hour,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_KeyExpirationNotice", Value: 1},
+ },
},
{
name: "invalid duration value",
key: KeyExpirationNoticeTime,
handlerValue: "-20",
wantValue: 24 * time.Hour,
+ wantError: errors.New(`time: missing unit in duration "-20"`),
defaultValue: 24 * time.Hour,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_KeyExpirationNotice_error", Value: 1},
+ },
},
{
name: "read non-existing value",
key: KeyExpirationNoticeTime,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: 24 * time.Hour,
defaultValue: 24 * time.Hour,
},
{
name: "read non-existing value different default",
key: KeyExpirationNoticeTime,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantValue: 0 * time.Second,
defaultValue: 0 * time.Second,
},
@@ -389,24 +474,43 @@ func TestGetDuration(t *testing.T) {
wantValue: 24 * time.Hour,
wantError: someOtherError,
defaultValue: 24 * time.Hour,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_KeyExpirationNotice_error", Value: 1},
+ },
},
}
+ RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- s: tt.handlerValue,
- err: tt.handlerError,
- })
+ h := metrics.NewTestHandler(t)
+ metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric)
+
+ s := source.TestSetting[string]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
duration, err := GetDuration(tt.key, tt.defaultValue)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if duration != tt.wantValue {
t.Errorf("duration=%v, want %v", duration, tt.wantValue)
}
+ wantMetrics := tt.wantMetrics
+ if !metrics.ShouldReport() {
+ // Check that metrics are not reported on platforms
+ // where they shouldn't be reported.
+ // As of 2024-09-04, syspolicy only reports metrics
+ // on Windows and Android.
+ wantMetrics = nil
+ }
+ h.MustEqual(wantMetrics...)
})
}
}
@@ -420,23 +524,28 @@ func TestGetStringArray(t *testing.T) {
defaultValue []string
wantValue []string
wantError error
+ wantMetrics []metrics.TestState
}{
{
name: "read existing value",
key: AllowedSuggestedExitNodes,
handlerValue: []string{"foo", "bar"},
wantValue: []string{"foo", "bar"},
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_any", Value: 1},
+ {Name: "$os_syspolicy_AllowedSuggestedExitNodes", Value: 1},
+ },
},
{
name: "read non-existing value",
key: AllowedSuggestedExitNodes,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
wantError: nil,
},
{
name: "read non-existing value, non nil default",
key: AllowedSuggestedExitNodes,
- handlerError: ErrNoSuchKey,
+ handlerError: ErrNotConfigured,
defaultValue: []string{"foo", "bar"},
wantValue: []string{"foo", "bar"},
wantError: nil,
@@ -446,28 +555,68 @@ func TestGetStringArray(t *testing.T) {
key: AllowedSuggestedExitNodes,
handlerError: someOtherError,
wantError: someOtherError,
+ wantMetrics: []metrics.TestState{
+ {Name: "$os_syspolicy_errors", Value: 1},
+ {Name: "$os_syspolicy_AllowedSuggestedExitNodes_error", Value: 1},
+ },
},
}
+ RegisterWellKnownSettingsForTest(t)
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- SetHandlerForTest(t, &testHandler{
- t: t,
- key: tt.key,
- sArr: tt.handlerValue,
- err: tt.handlerError,
- })
+ h := metrics.NewTestHandler(t)
+ metrics.SetHooksForTest(t, h.AddMetric, h.SetMetric)
+
+ s := source.TestSetting[[]string]{
+ Key: tt.key,
+ Value: tt.handlerValue,
+ Error: tt.handlerError,
+ }
+ registerSingleSettingStoreForTest(t, s)
+
value, err := GetStringArray(tt.key, tt.defaultValue)
- if err != tt.wantError {
+ if !errorsMatchForTest(err, tt.wantError) {
t.Errorf("err=%q, want %q", err, tt.wantError)
}
if !slices.Equal(tt.wantValue, value) {
t.Errorf("value=%v, want %v", value, tt.wantValue)
}
+ wantMetrics := tt.wantMetrics
+ if !metrics.ShouldReport() {
+ // Check that metrics are not reported on platforms
+ // where they shouldn't be reported.
+ // As of 2024-09-04, syspolicy only reports metrics
+ // on Windows and Android.
+ wantMetrics = nil
+ }
+ h.MustEqual(wantMetrics...)
})
}
}
+func registerSingleSettingStoreForTest[T source.TestValueType](tb TB, s source.TestSetting[T]) {
+ policyStore := source.NewTestStoreOf(tb, s)
+ MustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore)
+}
+
+func BenchmarkGetString(b *testing.B) {
+ loggerx.SetForTest(b, logger.Discard, logger.Discard)
+ RegisterWellKnownSettingsForTest(b)
+
+ wantControlURL := "https://login.tailscale.com"
+ registerSingleSettingStoreForTest(b, source.TestSettingOf(ControlURL, wantControlURL))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ gotControlURL, _ := GetString(ControlURL, "https://controlplane.tailscale.com")
+ if gotControlURL != wantControlURL {
+ b.Fatalf("got %v; want %v", gotControlURL, wantControlURL)
+ }
+ }
+}
+
func TestSelectControlURL(t *testing.T) {
tests := []struct {
reg, disk, want string
@@ -499,3 +648,13 @@ func TestSelectControlURL(t *testing.T) {
}
}
}
+
+func errorsMatchForTest(got, want error) bool {
+ if got == nil && want == nil {
+ return true
+ }
+ if got == nil || want == nil {
+ return false
+ }
+ return errors.Is(got, want) || got.Error() == want.Error()
+}
diff --git a/util/syspolicy/syspolicy_windows.go b/util/syspolicy/syspolicy_windows.go
new file mode 100644
index 0000000000000..9d57e249e55e3
--- /dev/null
+++ b/util/syspolicy/syspolicy_windows.go
@@ -0,0 +1,92 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package syspolicy
+
+import (
+ "errors"
+ "fmt"
+ "os/user"
+
+ "tailscale.com/util/syspolicy/internal"
+ "tailscale.com/util/syspolicy/rsop"
+ "tailscale.com/util/syspolicy/setting"
+ "tailscale.com/util/syspolicy/source"
+ "tailscale.com/util/testenv"
+)
+
+func init() {
+ // On Windows, we should automatically register the Registry-based policy
+ // store for the device. If we are running in a user's security context
+ // (e.g., we're the GUI), we should also register the Registry policy store for
+ // the user. In the future, we should register (and unregister) user policy
+ // stores whenever a user connects to (or disconnects from) the local backend.
+ // This ensures the backend is aware of the user's policy settings and can send
+ // them to the GUI/CLI/Web clients on demand or whenever they change.
+ //
+ // Other platforms, such as macOS, iOS and Android, should register their
+ // platform-specific policy stores via [RegisterStore]
+ // (or [RegisterHandler] until they implement the [source.Store] interface).
+ //
+ // External code, such as the ipnlocal package, may choose to register
+ // additional policy stores, such as config files and policies received from
+ // the control plane.
+ internal.Init.MustDefer(func() error {
+ // Do not register or use default policy stores during tests.
+ // Each test should set up its own necessary configurations.
+ if testenv.InTest() {
+ return nil
+ }
+ return configureSyspolicy(nil)
+ })
+}
+
+// configureSyspolicy configures syspolicy for use on Windows,
+// either in test or regular builds depending on whether tb has a non-nil value.
+func configureSyspolicy(tb internal.TB) error {
+ const localSystemSID = "S-1-5-18"
+ // Always create and register a machine policy store that reads
+ // policy settings from the HKEY_LOCAL_MACHINE registry hive.
+ machineStore, err := source.NewMachinePlatformPolicyStore()
+ if err != nil {
+ return fmt.Errorf("failed to create the machine policy store: %v", err)
+ }
+ if tb == nil {
+ _, err = rsop.RegisterStore("Platform", setting.DeviceScope, machineStore)
+ } else {
+ _, err = rsop.RegisterStoreForTest(tb, "Platform", setting.DeviceScope, machineStore)
+ }
+ if err != nil {
+ return err
+ }
+ // Check whether the current process is running as Local System or not.
+ u, err := user.Current()
+ if err != nil {
+ return err
+ }
+ if u.Uid == localSystemSID {
+ return nil
+ }
+ // If it's not a Local System's process (e.g., it's the GUI rather than the tailscaled service),
+ // we should create and use a policy store for the current user that reads
+ // policy settings from that user's registry hive (HKEY_CURRENT_USER).
+ userStore, err := source.NewUserPlatformPolicyStore(0)
+ if err != nil {
+ return fmt.Errorf("failed to create the current user's policy store: %v", err)
+ }
+ if tb == nil {
+ _, err = rsop.RegisterStore("Platform", setting.CurrentUserScope, userStore)
+ } else {
+ _, err = rsop.RegisterStoreForTest(tb, "Platform", setting.CurrentUserScope, userStore)
+ }
+ if err != nil {
+ return err
+ }
+ // And also set [setting.CurrentUserScope] as the [setting.DefaultScope], so [GetString],
+ // [GetVisibility] and similar functions would be returning a merged result
+ // of the machine's and user's policies.
+ if !setting.SetDefaultScope(setting.CurrentUserScope) {
+ return errors.New("current scope already set")
+ }
+ return nil
+}
diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go
new file mode 100644
index 0000000000000..7f85989ff062a
--- /dev/null
+++ b/util/usermetric/metrics.go
@@ -0,0 +1,69 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// This file contains user-facing metrics that are used by multiple packages.
+// Use it to define more common metrics. Any changes to the registry and
+// metric types should be in usermetric.go.
+
+package usermetric
+
+import (
+ "sync"
+
+ "tailscale.com/metrics"
+)
+
+// Metrics contains user-facing metrics that are used by multiple packages.
+type Metrics struct {
+ initOnce sync.Once
+
+ droppedPacketsInbound *metrics.MultiLabelMap[DropLabels]
+ droppedPacketsOutbound *metrics.MultiLabelMap[DropLabels]
+}
+
+// DropReason is the reason why a packet was dropped.
+type DropReason string
+
+const (
+ // ReasonACL means that the packet was not permitted by ACL.
+ ReasonACL DropReason = "acl"
+
+ // ReasonError means that the packet was dropped because of an error.
+ ReasonError DropReason = "error"
+)
+
+// DropLabels contains common label(s) for dropped packet counters.
+type DropLabels struct {
+ Reason DropReason
+}
+
+// initOnce initializes the common metrics.
+func (r *Registry) initOnce() {
+ r.m.initOnce.Do(func() {
+ r.m.droppedPacketsInbound = NewMultiLabelMapWithRegistry[DropLabels](
+ r,
+ "tailscaled_inbound_dropped_packets_total",
+ "counter",
+ "Counts the number of dropped packets received by the node from other peers",
+ )
+ r.m.droppedPacketsOutbound = NewMultiLabelMapWithRegistry[DropLabels](
+ r,
+ "tailscaled_outbound_dropped_packets_total",
+ "counter",
+ "Counts the number of packets dropped while being sent to other peers",
+ )
+ })
+}
+
+// DroppedPacketsOutbound returns the outbound dropped packet metric, creating it
+// if necessary.
+func (r *Registry) DroppedPacketsOutbound() *metrics.MultiLabelMap[DropLabels] {
+ r.initOnce()
+ return r.m.droppedPacketsOutbound
+}
+
+// DroppedPacketsInbound returns the inbound dropped packet metric.
+func (r *Registry) DroppedPacketsInbound() *metrics.MultiLabelMap[DropLabels] {
+ r.initOnce()
+ return r.m.droppedPacketsInbound
+}
diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go
index c964e08a76395..74e9447a64bbb 100644
--- a/util/usermetric/usermetric.go
+++ b/util/usermetric/usermetric.go
@@ -14,11 +14,15 @@ import (
"tailscale.com/metrics"
"tailscale.com/tsweb/varz"
+ "tailscale.com/util/set"
)
// Registry tracks user-facing metrics of various Tailscale subsystems.
type Registry struct {
vars expvar.Map
+
+ // m contains common metrics owned by the registry.
+ m Metrics
}
// NewMultiLabelMapWithRegistry creates and register a new
@@ -103,3 +107,13 @@ func (r *Registry) String() string {
return sb.String()
}
+
+// Metrics returns the name of all the metrics in the registry.
+func (r *Registry) MetricNames() []string {
+ ret := make(set.Set[string])
+ r.vars.Do(func(kv expvar.KeyValue) {
+ ret.Add(kv.Key)
+ })
+
+ return ret.Slice()
+}
diff --git a/util/winutil/s4u/s4u_windows.go b/util/winutil/s4u/s4u_windows.go
index a12b4786a0d06..8926aaedc5071 100644
--- a/util/winutil/s4u/s4u_windows.go
+++ b/util/winutil/s4u/s4u_windows.go
@@ -17,6 +17,7 @@ import (
"slices"
"strconv"
"strings"
+ "sync"
"sync/atomic"
"unsafe"
@@ -128,9 +129,10 @@ func Login(logf logger.Logf, srcName string, u *user.User, capLevel CapabilityLe
if err != nil {
return nil, err
}
+ tokenCloseOnce := sync.OnceFunc(func() { token.Close() })
defer func() {
if err != nil {
- token.Close()
+ tokenCloseOnce()
}
}()
@@ -162,6 +164,7 @@ func Login(logf logger.Logf, srcName string, u *user.User, capLevel CapabilityLe
sessToken.Close()
}
}()
+ tokenCloseOnce()
}
userProfile, err := winutil.LoadUserProfile(sessToken, u)
diff --git a/version/distro/distro.go b/version/distro/distro.go
index 8865a834b97d3..ce61137cf3280 100644
--- a/version/distro/distro.go
+++ b/version/distro/distro.go
@@ -6,13 +6,12 @@ package distro
import (
"bytes"
- "io"
"os"
"runtime"
"strconv"
"tailscale.com/types/lazy"
- "tailscale.com/util/lineread"
+ "tailscale.com/util/lineiter"
)
type Distro string
@@ -132,18 +131,19 @@ func DSMVersion() int {
return v
}
// But when run from the command line, we have to read it from the file:
- lineread.File("/etc/VERSION", func(line []byte) error {
+ for lr := range lineiter.File("/etc/VERSION") {
+ line, err := lr.Value()
+ if err != nil {
+ break // but otherwise ignore
+ }
line = bytes.TrimSpace(line)
if string(line) == `majorversion="7"` {
- v = 7
- return io.EOF
+ return 7
}
if string(line) == `majorversion="6"` {
- v = 6
- return io.EOF
+ return 6
}
- return nil
- })
- return v
+ }
+ return 0
})
}
diff --git a/version/version.go b/version/version.go
index 4b96d15eaa336..5edea22ca6df0 100644
--- a/version/version.go
+++ b/version/version.go
@@ -7,6 +7,7 @@ package version
import (
"fmt"
"runtime/debug"
+ "strconv"
"strings"
tailscaleroot "tailscale.com"
@@ -169,3 +170,42 @@ func majorMinorPatch() string {
ret, _, _ := strings.Cut(Short(), "-")
return ret
}
+
+func isValidLongWithTwoRepos(v string) bool {
+ s := strings.Split(v, "-")
+ if len(s) != 3 {
+ return false
+ }
+ hexChunk := func(s string) bool {
+ if len(s) < 6 {
+ return false
+ }
+ for i := range len(s) {
+ b := s[i]
+ if (b < '0' || b > '9') && (b < 'a' || b > 'f') {
+ return false
+ }
+ }
+ return true
+ }
+
+ v, t, g := s[0], s[1], s[2]
+ if !strings.HasPrefix(t, "t") || !strings.HasPrefix(g, "g") ||
+ !hexChunk(t[1:]) || !hexChunk(g[1:]) {
+ return false
+ }
+ nums := strings.Split(v, ".")
+ if len(nums) != 3 {
+ return false
+ }
+ for i, n := range nums {
+ bits := 8
+ if i == 2 {
+ bits = 16
+ }
+ if _, err := strconv.ParseUint(n, 10, bits); err != nil {
+ return false
+ }
+ }
+ return true
+}
diff --git a/version/version_checkformat.go b/version/version_checkformat.go
new file mode 100644
index 0000000000000..05a97d1912dbe
--- /dev/null
+++ b/version/version_checkformat.go
@@ -0,0 +1,17 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build tailscale_go && android
+
+package version
+
+import "fmt"
+
+func init() {
+ // For official Android builds using the tailscale_go toolchain,
+ // panic if the builder is screwed up and we fail to stamp a valid
+ // version string.
+ if !isValidLongWithTwoRepos(Long()) {
+ panic(fmt.Sprintf("malformed version.Long value %q", Long()))
+ }
+}
diff --git a/version/version_internal_test.go b/version/version_internal_test.go
new file mode 100644
index 0000000000000..19aeab44228bd
--- /dev/null
+++ b/version/version_internal_test.go
@@ -0,0 +1,27 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package version
+
+import "testing"
+
+func TestIsValidLongWithTwoRepos(t *testing.T) {
+ tests := []struct {
+ long string
+ want bool
+ }{
+ {"1.2.3-t01234abcde-g01234abcde", true},
+ {"1.2.259-t01234abcde-g01234abcde", true}, // big patch version
+ {"1.2.3-t01234abcde", false}, // missing repo
+ {"1.2.3-g01234abcde", false}, // missing repo
+ {"-t01234abcde-g01234abcde", false},
+ {"1.2.3", false},
+ {"1.2.3-t01234abcde-g", false},
+ {"1.2.3-t01234abcde-gERRBUILDINFO", false},
+ }
+ for _, tt := range tests {
+ if got := isValidLongWithTwoRepos(tt.long); got != tt.want {
+ t.Errorf("IsValidLongWithTwoRepos(%q) = %v; want %v", tt.long, got, tt.want)
+ }
+ }
+}
diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go
index 56224ac5d3fbc..9e5d8a37f2b24 100644
--- a/wgengine/filter/filter.go
+++ b/wgengine/filter/filter.go
@@ -202,16 +202,17 @@ func New(matches []Match, capTest CapTestFunc, localNets, logIPs *netipx.IPSet,
}
f := &Filter{
- logf: logf,
- matches4: matchesFamily(matches, netip.Addr.Is4),
- matches6: matchesFamily(matches, netip.Addr.Is6),
- cap4: capMatchesFunc(matches, netip.Addr.Is4),
- cap6: capMatchesFunc(matches, netip.Addr.Is6),
- local4: ipset.FalseContainsIPFunc(),
- local6: ipset.FalseContainsIPFunc(),
- logIPs4: ipset.FalseContainsIPFunc(),
- logIPs6: ipset.FalseContainsIPFunc(),
- state: state,
+ logf: logf,
+ matches4: matchesFamily(matches, netip.Addr.Is4),
+ matches6: matchesFamily(matches, netip.Addr.Is6),
+ cap4: capMatchesFunc(matches, netip.Addr.Is4),
+ cap6: capMatchesFunc(matches, netip.Addr.Is6),
+ local4: ipset.FalseContainsIPFunc(),
+ local6: ipset.FalseContainsIPFunc(),
+ logIPs4: ipset.FalseContainsIPFunc(),
+ logIPs6: ipset.FalseContainsIPFunc(),
+ state: state,
+ srcIPHasCap: capTest,
}
if localNets != nil {
p := localNets.Prefixes()
diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go
index 6c07b0d5eaa83..aa109c242e27c 100644
--- a/wgengine/magicsock/debughttp.go
+++ b/wgengine/magicsock/debughttp.go
@@ -102,8 +102,7 @@ func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) {
sort.Slice(ent, func(i, j int) bool { return ent[i].pub.Less(ent[j].pub) })
peers := map[key.NodePublic]tailcfg.NodeView{}
- for i := range c.peers.Len() {
- p := c.peers.At(i)
+ for _, p := range c.peers.All() {
peers[p.Key()] = p
}
diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go
index 1a42c3b610f84..e9f07086271d5 100644
--- a/wgengine/magicsock/derp.go
+++ b/wgengine/magicsock/derp.go
@@ -649,9 +649,10 @@ func (c *Conn) runDerpReader(ctx context.Context, regionID int, dc *derphttp.Cli
}
type derpWriteRequest struct {
- addr netip.AddrPort
- pubKey key.NodePublic
- b []byte // copied; ownership passed to receiver
+ addr netip.AddrPort
+ pubKey key.NodePublic
+ b []byte // copied; ownership passed to receiver
+ isDisco bool
}
// runDerpWriter runs in a goroutine for the life of a DERP
@@ -673,8 +674,12 @@ func (c *Conn) runDerpWriter(ctx context.Context, dc *derphttp.Client, ch <-chan
if err != nil {
c.logf("magicsock: derp.Send(%v): %v", wr.addr, err)
metricSendDERPError.Add(1)
- } else {
- metricSendDERP.Add(1)
+ if !wr.isDisco {
+ c.metrics.outboundPacketsDroppedErrors.Add(1)
+ }
+ } else if !wr.isDisco {
+ c.metrics.outboundPacketsDERPTotal.Add(1)
+ c.metrics.outboundBytesDERPTotal.Add(int64(len(wr.b)))
}
}
}
@@ -695,7 +700,6 @@ func (c *connBind) receiveDERP(buffs [][]byte, sizes []int, eps []conn.Endpoint)
// No data read occurred. Wait for another packet.
continue
}
- metricRecvDataDERP.Add(1)
sizes[0] = n
eps[0] = ep
return 1, nil
@@ -733,8 +737,11 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en
ep.noteRecvActivity(ipp, mono.Now())
if stats := c.stats.Load(); stats != nil {
- stats.UpdateRxPhysical(ep.nodeAddr, ipp, dm.n)
+ stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, dm.n)
}
+
+ c.metrics.inboundPacketsDERPTotal.Add(1)
+ c.metrics.inboundBytesDERPTotal.Add(int64(n))
return n, ep
}
diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go
index 53ecb84de833b..bbba3181ce453 100644
--- a/wgengine/magicsock/endpoint.go
+++ b/wgengine/magicsock/endpoint.go
@@ -9,6 +9,7 @@ import (
"encoding/binary"
"errors"
"fmt"
+ "iter"
"math"
"math/rand/v2"
"net"
@@ -960,26 +961,40 @@ func (de *endpoint) send(buffs [][]byte) error {
de.noteBadEndpoint(udpAddr)
}
+ var txBytes int
+ for _, b := range buffs {
+ txBytes += len(b)
+ }
+
+ switch {
+ case udpAddr.Addr().Is4():
+ de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs)))
+ de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes))
+ case udpAddr.Addr().Is6():
+ de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs)))
+ de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes))
+ }
+
// TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends.
if stats := de.c.stats.Load(); err == nil && stats != nil {
- var txBytes int
- for _, b := range buffs {
- txBytes += len(b)
- }
- stats.UpdateTxPhysical(de.nodeAddr, udpAddr, txBytes)
+ stats.UpdateTxPhysical(de.nodeAddr, udpAddr, len(buffs), txBytes)
}
}
if derpAddr.IsValid() {
allOk := true
+ var txBytes int
for _, buff := range buffs {
- ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff)
- if stats := de.c.stats.Load(); stats != nil {
- stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buff))
- }
+ const isDisco = false
+ ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco)
+ txBytes += len(buff)
if !ok {
allOk = false
}
}
+
+ if stats := de.c.stats.Load(); stats != nil {
+ stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes)
+ }
if allOk {
return nil
}
@@ -1370,20 +1385,18 @@ func (de *endpoint) updateFromNode(n tailcfg.NodeView, heartbeatDisabled bool, p
}
func (de *endpoint) setEndpointsLocked(eps interface {
- Len() int
- At(i int) netip.AddrPort
+ All() iter.Seq2[int, netip.AddrPort]
}) {
for _, st := range de.endpointState {
st.index = indexSentinelDeleted // assume deleted until updated in next loop
}
var newIpps []netip.AddrPort
- for i := range eps.Len() {
+ for i, ipp := range eps.All() {
if i > math.MaxInt16 {
// Seems unlikely.
break
}
- ipp := eps.At(i)
if !ipp.IsValid() {
de.c.logf("magicsock: bogus netmap endpoint from %v", eps)
continue
diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go
index 08aff842d77aa..bff905caa5ae4 100644
--- a/wgengine/magicsock/magicsock.go
+++ b/wgengine/magicsock/magicsock.go
@@ -10,6 +10,7 @@ import (
"bytes"
"context"
"errors"
+ "expvar"
"fmt"
"io"
"net"
@@ -80,6 +81,58 @@ const (
socketBufferSize = 7 << 20
)
+// Path is a label indicating the type of path a packet took.
+type Path string
+
+const (
+ PathDirectIPv4 Path = "direct_ipv4"
+ PathDirectIPv6 Path = "direct_ipv6"
+ PathDERP Path = "derp"
+)
+
+type pathLabel struct {
+ // Path indicates the path that the packet took:
+ // - direct_ipv4
+ // - direct_ipv6
+ // - derp
+ Path Path
+}
+
+// metrics in wgengine contains the usermetrics counters for magicsock, it
+// is however a bit special. All them metrics are labeled, but looking up
+// the metric everytime we need to record it has an overhead, and includes
+// a lock in MultiLabelMap. The metrics are therefore instead created with
+// wgengine and the underlying expvar.Int is stored to be used directly.
+type metrics struct {
+ // inboundPacketsTotal is the total number of inbound packets received,
+ // labeled by the path the packet took.
+ inboundPacketsIPv4Total expvar.Int
+ inboundPacketsIPv6Total expvar.Int
+ inboundPacketsDERPTotal expvar.Int
+
+ // inboundBytesTotal is the total number of inbound bytes received,
+ // labeled by the path the packet took.
+ inboundBytesIPv4Total expvar.Int
+ inboundBytesIPv6Total expvar.Int
+ inboundBytesDERPTotal expvar.Int
+
+ // outboundPacketsTotal is the total number of outbound packets sent,
+ // labeled by the path the packet took.
+ outboundPacketsIPv4Total expvar.Int
+ outboundPacketsIPv6Total expvar.Int
+ outboundPacketsDERPTotal expvar.Int
+
+ // outboundBytesTotal is the total number of outbound bytes sent,
+ // labeled by the path the packet took.
+ outboundBytesIPv4Total expvar.Int
+ outboundBytesIPv6Total expvar.Int
+ outboundBytesDERPTotal expvar.Int
+
+ // outboundPacketsDroppedErrors is the total number of outbound packets
+ // dropped due to errors.
+ outboundPacketsDroppedErrors expvar.Int
+}
+
// A Conn routes UDP packets and actively manages a list of its endpoints.
type Conn struct {
// This block mirrors the contents and field order of the Options
@@ -321,6 +374,9 @@ type Conn struct {
// responsibility to ensure that traffic from these endpoints is routed
// to the node.
staticEndpoints views.Slice[netip.AddrPort]
+
+ // metrics contains the metrics for the magicsock instance.
+ metrics *metrics
}
// SetDebugLoggingEnabled controls whether spammy debug logging is enabled.
@@ -503,6 +559,8 @@ func NewConn(opts Options) (*Conn, error) {
UseDNSCache: true,
}
+ c.metrics = registerMetrics(opts.Metrics)
+
if d4, err := c.listenRawDisco("ip4"); err == nil {
c.logf("[v1] using BPF disco receiver for IPv4")
c.closeDisco4 = d4
@@ -520,6 +578,80 @@ func NewConn(opts Options) (*Conn, error) {
return c, nil
}
+// registerMetrics wires up the metrics for wgengine, instead of
+// registering the label metric directly, the underlying expvar is exposed.
+// See metrics for more info.
+func registerMetrics(reg *usermetric.Registry) *metrics {
+ pathDirectV4 := pathLabel{Path: PathDirectIPv4}
+ pathDirectV6 := pathLabel{Path: PathDirectIPv6}
+ pathDERP := pathLabel{Path: PathDERP}
+ inboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
+ reg,
+ "tailscaled_inbound_packets_total",
+ "counter",
+ "Counts the number of packets received from other peers",
+ )
+ inboundBytesTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
+ reg,
+ "tailscaled_inbound_bytes_total",
+ "counter",
+ "Counts the number of bytes received from other peers",
+ )
+ outboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
+ reg,
+ "tailscaled_outbound_packets_total",
+ "counter",
+ "Counts the number of packets sent to other peers",
+ )
+ outboundBytesTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel](
+ reg,
+ "tailscaled_outbound_bytes_total",
+ "counter",
+ "Counts the number of bytes sent to other peers",
+ )
+ outboundPacketsDroppedErrors := reg.DroppedPacketsOutbound()
+
+ m := new(metrics)
+
+ // Map clientmetrics to the usermetric counters.
+ metricRecvDataPacketsIPv4.Register(&m.inboundPacketsIPv4Total)
+ metricRecvDataPacketsIPv6.Register(&m.inboundPacketsIPv6Total)
+ metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal)
+ metricSendUDP.Register(&m.outboundPacketsIPv4Total)
+ metricSendUDP.Register(&m.outboundPacketsIPv6Total)
+ metricSendDERP.Register(&m.outboundPacketsDERPTotal)
+
+ inboundPacketsTotal.Set(pathDirectV4, &m.inboundPacketsIPv4Total)
+ inboundPacketsTotal.Set(pathDirectV6, &m.inboundPacketsIPv6Total)
+ inboundPacketsTotal.Set(pathDERP, &m.inboundPacketsDERPTotal)
+
+ inboundBytesTotal.Set(pathDirectV4, &m.inboundBytesIPv4Total)
+ inboundBytesTotal.Set(pathDirectV6, &m.inboundBytesIPv6Total)
+ inboundBytesTotal.Set(pathDERP, &m.inboundBytesDERPTotal)
+
+ outboundPacketsTotal.Set(pathDirectV4, &m.outboundPacketsIPv4Total)
+ outboundPacketsTotal.Set(pathDirectV6, &m.outboundPacketsIPv6Total)
+ outboundPacketsTotal.Set(pathDERP, &m.outboundPacketsDERPTotal)
+
+ outboundBytesTotal.Set(pathDirectV4, &m.outboundBytesIPv4Total)
+ outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total)
+ outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal)
+
+ outboundPacketsDroppedErrors.Set(usermetric.DropLabels{Reason: usermetric.ReasonError}, &m.outboundPacketsDroppedErrors)
+
+ return m
+}
+
+// deregisterMetrics unregisters the underlying usermetrics expvar counters
+// from clientmetrics.
+func deregisterMetrics(m *metrics) {
+ metricRecvDataPacketsIPv4.UnregisterAll()
+ metricRecvDataPacketsIPv6.UnregisterAll()
+ metricRecvDataPacketsDERP.UnregisterAll()
+ metricSendUDP.UnregisterAll()
+ metricSendDERP.UnregisterAll()
+}
+
// InstallCaptureHook installs a callback which is called to
// log debug information into the pcap stream. This function
// can be called with a nil argument to uninstall the capture
@@ -988,8 +1120,8 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro
// re-run.
eps = c.endpointTracker.update(time.Now(), eps)
- for i := range c.staticEndpoints.Len() {
- addAddr(c.staticEndpoints.At(i), tailcfg.EndpointExplicitConf)
+ for _, ep := range c.staticEndpoints.All() {
+ addAddr(ep, tailcfg.EndpointExplicitConf)
}
if localAddr := c.pconn4.LocalAddr(); localAddr.IP.IsUnspecified() {
@@ -1078,8 +1210,13 @@ func (c *Conn) networkDown() bool { return !c.networkUp.Load() }
// Send implements conn.Bind.
//
// See https://pkg.go.dev/golang.zx2c4.com/wireguard/conn#Bind.Send
-func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) error {
+func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint) (err error) {
n := int64(len(buffs))
+ defer func() {
+ if err != nil {
+ c.metrics.outboundPacketsDroppedErrors.Add(n)
+ }
+ }()
metricSendData.Add(n)
if c.networkDown() {
metricSendDataNetworkDown.Add(n)
@@ -1130,7 +1267,7 @@ func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err
// sendUDP sends UDP packet b to ipp.
// See sendAddr's docs on the return value meanings.
-func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) {
+func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, err error) {
if runtime.GOOS == "js" {
return false, errNoUDP
}
@@ -1139,8 +1276,15 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) {
metricSendUDPError.Add(1)
_ = c.maybeRebindOnError(runtime.GOOS, err)
} else {
- if sent {
- metricSendUDP.Add(1)
+ if sent && !isDisco {
+ switch {
+ case ipp.Addr().Is4():
+ c.metrics.outboundPacketsIPv4Total.Add(1)
+ c.metrics.outboundBytesIPv4Total.Add(int64(len(b)))
+ case ipp.Addr().Is6():
+ c.metrics.outboundPacketsIPv6Total.Add(1)
+ c.metrics.outboundBytesIPv6Total.Add(int64(len(b)))
+ }
}
}
return
@@ -1225,9 +1369,9 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error)
// An example of when they might be different: sending to an
// IPv6 address when the local machine doesn't have IPv6 support
// returns (false, nil); it's not an error, but nothing was sent.
-func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte) (sent bool, err error) {
+func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) {
if addr.Addr() != tailcfg.DerpMagicIPAddr {
- return c.sendUDP(addr, b)
+ return c.sendUDP(addr, b, isDisco)
}
regionID := int(addr.Port())
@@ -1248,7 +1392,7 @@ func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte) (s
case <-c.donec:
metricSendDERPErrorClosed.Add(1)
return false, errConnClosed
- case ch <- derpWriteRequest{addr, pubKey, pkt}:
+ case ch <- derpWriteRequest{addr, pubKey, pkt, isDisco}:
metricSendDERPQueued.Add(1)
return true, nil
default:
@@ -1278,19 +1422,24 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) {
c.receiveBatchPool.Put(batch)
}
-// receiveIPv4 creates an IPv4 ReceiveFunc reading from c.pconn4.
func (c *Conn) receiveIPv4() conn.ReceiveFunc {
- return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), metricRecvDataIPv4)
+ return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4),
+ &c.metrics.inboundPacketsIPv4Total,
+ &c.metrics.inboundBytesIPv4Total,
+ )
}
// receiveIPv6 creates an IPv6 ReceiveFunc reading from c.pconn6.
func (c *Conn) receiveIPv6() conn.ReceiveFunc {
- return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), metricRecvDataIPv6)
+ return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6),
+ &c.metrics.inboundPacketsIPv6Total,
+ &c.metrics.inboundBytesIPv6Total,
+ )
}
// mkReceiveFunc creates a ReceiveFunc reading from ruc.
-// The provided healthItem and metric are updated if non-nil.
-func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, metric *clientmetric.Metric) conn.ReceiveFunc {
+// The provided healthItem and metrics are updated if non-nil.
+func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc {
// epCache caches an IPPort->endpoint for hot flows.
var epCache ippEndpointCache
@@ -1327,8 +1476,11 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu
}
ipp := msg.Addr.(*net.UDPAddr).AddrPort()
if ep, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok {
- if metric != nil {
- metric.Add(1)
+ if packetMetric != nil {
+ packetMetric.Add(1)
+ }
+ if bytesMetric != nil {
+ bytesMetric.Add(int64(msg.N))
}
eps[i] = ep
sizes[i] = msg.N
@@ -1384,7 +1536,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *ippEndpointCache)
ep.lastRecvUDPAny.StoreAtomic(now)
ep.noteRecvActivity(ipp, now)
if stats := c.stats.Load(); stats != nil {
- stats.UpdateRxPhysical(ep.nodeAddr, ipp, len(b))
+ stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b))
}
return ep, true
}
@@ -1438,7 +1590,8 @@ func (c *Conn) sendDiscoMessage(dst netip.AddrPort, dstKey key.NodePublic, dstDi
box := di.sharedKey.Seal(m.AppendMarshal(nil))
pkt = append(pkt, box...)
- sent, err = c.sendAddr(dst, dstKey, pkt)
+ const isDisco = true
+ sent, err = c.sendAddr(dst, dstKey, pkt, isDisco)
if sent {
if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) {
node := "?"
@@ -2207,16 +2360,14 @@ func (c *Conn) logEndpointCreated(n tailcfg.NodeView) {
fmt.Fprintf(w, "derp=%v%s ", regionID, code)
}
- for i := range n.AllowedIPs().Len() {
- a := n.AllowedIPs().At(i)
+ for _, a := range n.AllowedIPs().All() {
if a.IsSingleIP() {
fmt.Fprintf(w, "aip=%v ", a.Addr())
} else {
fmt.Fprintf(w, "aip=%v ", a)
}
}
- for i := range n.Endpoints().Len() {
- ep := n.Endpoints().At(i)
+ for _, ep := range n.Endpoints().All() {
fmt.Fprintf(w, "ep=%v ", ep)
}
}))
@@ -2377,6 +2528,8 @@ func (c *Conn) Close() error {
pinger.Close()
}
+ deregisterMetrics(c.metrics)
+
return nil
}
@@ -2860,6 +3013,14 @@ func (c *Conn) DebugPickNewDERP() error {
return errors.New("too few regions")
}
+func (c *Conn) DebugForcePreferDERP(n int) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.logf("magicsock: [debug] force preferred DERP set to: %d", n)
+ c.netChecker.SetForcePreferredDERP(n)
+}
+
// portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize,
// logging an error if it occurs.
func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) {
@@ -2930,17 +3091,17 @@ var (
metricSendDERPErrorChan = clientmetric.NewCounter("magicsock_send_derp_error_chan")
metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed")
metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue")
- metricSendUDP = clientmetric.NewCounter("magicsock_send_udp")
+ metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp")
metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error")
- metricSendDERP = clientmetric.NewCounter("magicsock_send_derp")
+ metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp")
metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error")
// Data packets (non-disco)
metricSendData = clientmetric.NewCounter("magicsock_send_data")
metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down")
- metricRecvDataDERP = clientmetric.NewCounter("magicsock_recv_data_derp")
- metricRecvDataIPv4 = clientmetric.NewCounter("magicsock_recv_data_ipv4")
- metricRecvDataIPv6 = clientmetric.NewCounter("magicsock_recv_data_ipv6")
+ metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp")
+ metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4")
+ metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6")
// Disco packets
metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp")
diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go
index 6b2d961b9b6fd..1b3f8ec73c16e 100644
--- a/wgengine/magicsock/magicsock_test.go
+++ b/wgengine/magicsock/magicsock_test.go
@@ -28,6 +28,7 @@ import (
"time"
"unsafe"
+ qt "github.com/frankban/quicktest"
wgconn "github.com/tailscale/wireguard-go/conn"
"github.com/tailscale/wireguard-go/device"
"github.com/tailscale/wireguard-go/tun/tuntest"
@@ -62,6 +63,7 @@ import (
"tailscale.com/types/nettype"
"tailscale.com/types/ptr"
"tailscale.com/util/cibuild"
+ "tailscale.com/util/must"
"tailscale.com/util/racebuild"
"tailscale.com/util/set"
"tailscale.com/util/usermetric"
@@ -175,6 +177,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
if err != nil {
t.Fatalf("netmon.New: %v", err)
}
+ ht := new(health.Tracker)
var reg usermetric.Registry
epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary
@@ -182,6 +185,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen
NetMon: netMon,
Metrics: ®,
Logf: logf,
+ HealthTracker: ht,
DisablePortMapper: true,
TestOnlyPacketListener: l,
EndpointsFunc: func(eps []tailcfg.Endpoint) {
@@ -1188,6 +1192,91 @@ func testTwoDevicePing(t *testing.T, d *devices) {
checkStats(t, m1, m1Conns)
checkStats(t, m2, m2Conns)
})
+ t.Run("compare-metrics-stats", func(t *testing.T) {
+ setT(t)
+ defer setT(outerT)
+ m1.conn.resetMetricsForTest()
+ m1.stats.TestExtract()
+ m2.conn.resetMetricsForTest()
+ m2.stats.TestExtract()
+ t.Logf("Metrics before: %s\n", m1.metrics.String())
+ ping1(t)
+ ping2(t)
+ assertConnStatsAndUserMetricsEqual(t, m1)
+ assertConnStatsAndUserMetricsEqual(t, m2)
+ t.Logf("Metrics after: %s\n", m1.metrics.String())
+ })
+}
+
+func (c *Conn) resetMetricsForTest() {
+ c.metrics.inboundBytesIPv4Total.Set(0)
+ c.metrics.inboundPacketsIPv4Total.Set(0)
+ c.metrics.outboundBytesIPv4Total.Set(0)
+ c.metrics.outboundPacketsIPv4Total.Set(0)
+ c.metrics.inboundBytesIPv6Total.Set(0)
+ c.metrics.inboundPacketsIPv6Total.Set(0)
+ c.metrics.outboundBytesIPv6Total.Set(0)
+ c.metrics.outboundPacketsIPv6Total.Set(0)
+ c.metrics.inboundBytesDERPTotal.Set(0)
+ c.metrics.inboundPacketsDERPTotal.Set(0)
+ c.metrics.outboundBytesDERPTotal.Set(0)
+ c.metrics.outboundPacketsDERPTotal.Set(0)
+}
+
+func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) {
+ _, phys := ms.stats.TestExtract()
+
+ physIPv4RxBytes := int64(0)
+ physIPv4TxBytes := int64(0)
+ physDERPRxBytes := int64(0)
+ physDERPTxBytes := int64(0)
+ physIPv4RxPackets := int64(0)
+ physIPv4TxPackets := int64(0)
+ physDERPRxPackets := int64(0)
+ physDERPTxPackets := int64(0)
+ for conn, count := range phys {
+ t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String())
+ if conn.Dst.String() == "127.3.3.40:1" {
+ physDERPRxBytes += int64(count.RxBytes)
+ physDERPTxBytes += int64(count.TxBytes)
+ physDERPRxPackets += int64(count.RxPackets)
+ physDERPTxPackets += int64(count.TxPackets)
+ } else {
+ physIPv4RxBytes += int64(count.RxBytes)
+ physIPv4TxBytes += int64(count.TxBytes)
+ physIPv4RxPackets += int64(count.RxPackets)
+ physIPv4TxPackets += int64(count.TxPackets)
+ }
+ }
+
+ metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value()
+ metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value()
+ metricIPv4TxBytes := ms.conn.metrics.outboundBytesIPv4Total.Value()
+ metricIPv4TxPackets := ms.conn.metrics.outboundPacketsIPv4Total.Value()
+
+ metricDERPRxBytes := ms.conn.metrics.inboundBytesDERPTotal.Value()
+ metricDERPRxPackets := ms.conn.metrics.inboundPacketsDERPTotal.Value()
+ metricDERPTxBytes := ms.conn.metrics.outboundBytesDERPTotal.Value()
+ metricDERPTxPackets := ms.conn.metrics.outboundPacketsDERPTotal.Value()
+
+ c := qt.New(t)
+ c.Assert(physDERPRxBytes, qt.Equals, metricDERPRxBytes)
+ c.Assert(physDERPTxBytes, qt.Equals, metricDERPTxBytes)
+ c.Assert(physIPv4RxBytes, qt.Equals, metricIPv4RxBytes)
+ c.Assert(physIPv4TxBytes, qt.Equals, metricIPv4TxBytes)
+ c.Assert(physDERPRxPackets, qt.Equals, metricDERPRxPackets)
+ c.Assert(physDERPTxPackets, qt.Equals, metricDERPTxPackets)
+ c.Assert(physIPv4RxPackets, qt.Equals, metricIPv4RxPackets)
+ c.Assert(physIPv4TxPackets, qt.Equals, metricIPv4TxPackets)
+
+ // Validate that the usermetrics and clientmetrics are in sync
+ // Note: the clientmetrics are global, this means that when they are registering with the
+ // wgengine, multiple in-process nodes used by this test will be updating the same metrics. This is why we need to multiply
+ // the metrics by 2 to get the expected value.
+ // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420
+ c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2)
+ c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2)
+ c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2)
}
func TestDiscoMessage(t *testing.T) {
@@ -2995,3 +3084,27 @@ func TestMaybeRebindOnError(t *testing.T) {
}
})
}
+
+func TestNetworkDownSendErrors(t *testing.T) {
+ netMon := must.Get(netmon.New(t.Logf))
+ defer netMon.Close()
+
+ reg := new(usermetric.Registry)
+ conn := must.Get(NewConn(Options{
+ DisablePortMapper: true,
+ Logf: t.Logf,
+ NetMon: netMon,
+ Metrics: reg,
+ }))
+ defer conn.Close()
+
+ conn.SetNetworkUp(false)
+ if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}); err == nil {
+ t.Error("expected error, got nil")
+ }
+ resp := httptest.NewRecorder()
+ reg.Handler(resp, new(http.Request))
+ if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) {
+ t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String())
+ }
+}
diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go
index 3185c5d556aa9..20eac06e6b8fd 100644
--- a/wgengine/netstack/netstack.go
+++ b/wgengine/netstack/netstack.go
@@ -32,7 +32,6 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
- "tailscale.com/drive"
"tailscale.com/envknob"
"tailscale.com/ipn/ipnlocal"
"tailscale.com/metrics"
@@ -174,19 +173,18 @@ type Impl struct {
// It can only be set before calling Start.
ProcessSubnets bool
- ipstack *stack.Stack
- linkEP *linkEndpoint
- tundev *tstun.Wrapper
- e wgengine.Engine
- pm *proxymap.Mapper
- mc *magicsock.Conn
- logf logger.Logf
- dialer *tsdial.Dialer
- ctx context.Context // alive until Close
- ctxCancel context.CancelFunc // called on Close
- lb *ipnlocal.LocalBackend // or nil
- dns *dns.Manager
- driveForLocal drive.FileSystemForLocal // or nil
+ ipstack *stack.Stack
+ linkEP *linkEndpoint
+ tundev *tstun.Wrapper
+ e wgengine.Engine
+ pm *proxymap.Mapper
+ mc *magicsock.Conn
+ logf logger.Logf
+ dialer *tsdial.Dialer
+ ctx context.Context // alive until Close
+ ctxCancel context.CancelFunc // called on Close
+ lb *ipnlocal.LocalBackend // or nil
+ dns *dns.Manager
// loopbackPort, if non-nil, will enable Impl to loop back (dnat to
// :loopbackPort) TCP & UDP flows originally
@@ -288,7 +286,7 @@ func setTCPBufSizes(ipstack *stack.Stack) error {
}
// Create creates and populates a new Impl.
-func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper, driveForLocal drive.FileSystemForLocal) (*Impl, error) {
+func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper) (*Impl, error) {
if mc == nil {
return nil, errors.New("nil magicsock.Conn")
}
@@ -382,7 +380,6 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi
connsInFlightByClient: make(map[netip.Addr]int),
packetsInFlight: make(map[stack.TransportEndpointID]struct{}),
dns: dns,
- driveForLocal: driveForLocal,
}
loopbackPort, ok := envknob.LookupInt("TS_DEBUG_NETSTACK_LOOPBACK_PORT")
if ok && loopbackPort >= 0 && loopbackPort <= math.MaxUint16 {
@@ -646,13 +643,11 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) {
newPfx := make(map[netip.Prefix]bool)
if selfNode.Valid() {
- for i := range selfNode.Addresses().Len() {
- p := selfNode.Addresses().At(i)
+ for _, p := range selfNode.Addresses().All() {
newPfx[p] = true
}
if ns.ProcessSubnets {
- for i := range selfNode.AllowedIPs().Len() {
- p := selfNode.AllowedIPs().At(i)
+ for _, p := range selfNode.AllowedIPs().All() {
newPfx[p] = true
}
}
diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go
index 1bfc76fef097f..823acee9156b7 100644
--- a/wgengine/netstack/netstack_test.go
+++ b/wgengine/netstack/netstack_test.go
@@ -64,8 +64,9 @@ func TestInjectInboundLeak(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ t.Cleanup(lb.Shutdown)
- ns, err := Create(logf, tunWrap, eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil)
+ ns, err := Create(logf, tunWrap, eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper())
if err != nil {
t.Fatal(err)
}
@@ -116,7 +117,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl {
tb.Cleanup(func() { eng.Close() })
sys.Set(eng)
- ns, err := Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil)
+ ns, err := Create(logf, sys.Tun.Get(), eng, sys.MagicSock.Get(), dialer, sys.DNSManager.Get(), sys.ProxyMapper())
if err != nil {
tb.Fatal(err)
}
@@ -126,6 +127,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl {
if err != nil {
tb.Fatalf("NewLocalBackend: %v", err)
}
+ tb.Cleanup(lb.Shutdown)
ns.atomicIsLocalIPFunc.Store(func(netip.Addr) bool { return true })
if config != nil {
diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go
index 340c7e0f3f7be..7db07c685aa75 100644
--- a/wgengine/pendopen.go
+++ b/wgengine/pendopen.go
@@ -207,8 +207,7 @@ func (e *userspaceEngine) onOpenTimeout(flow flowtrack.Tuple) {
ps, found := e.getPeerStatusLite(n.Key())
if !found {
onlyZeroRoute := true // whether peerForIP returned n only because its /0 route matched
- for i := range n.AllowedIPs().Len() {
- r := n.AllowedIPs().At(i)
+ for _, r := range n.AllowedIPs().All() {
if r.Bits() != 0 && r.Contains(flow.DstAddr()) {
onlyZeroRoute = false
break
diff --git a/wgengine/userspace.go b/wgengine/userspace.go
index fc204736a1da2..81f8000e0d557 100644
--- a/wgengine/userspace.go
+++ b/wgengine/userspace.go
@@ -852,8 +852,7 @@ func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic,
// hasOverlap checks if there is a IPPrefix which is common amongst the two
// provided slices.
func hasOverlap(aips, rips views.Slice[netip.Prefix]) bool {
- for i := range aips.Len() {
- aip := aips.At(i)
+ for _, aip := range aips.All() {
if views.SliceContains(rips, aip) {
return true
}
@@ -1236,7 +1235,7 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) {
// and Apple platforms.
if changed {
switch runtime.GOOS {
- case "linux", "android", "ios", "darwin":
+ case "linux", "android", "ios", "darwin", "openbsd":
e.wgLock.Lock()
dnsCfg := e.lastDNSConfig
e.wgLock.Unlock()
@@ -1329,9 +1328,9 @@ func (e *userspaceEngine) mySelfIPMatchingFamily(dst netip.Addr) (src netip.Addr
if addrs.Len() == 0 {
return zero, errors.New("no self address in netmap")
}
- for i := range addrs.Len() {
- if a := addrs.At(i); a.IsSingleIP() && a.Addr().BitLen() == dst.BitLen() {
- return a.Addr(), nil
+ for _, p := range addrs.All() {
+ if p.IsSingleIP() && p.Addr().BitLen() == dst.BitLen() {
+ return p.Addr(), nil
}
}
return zero, errors.New("no self address in netmap matching address family")
diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go
index d156f7fcb0ef2..e7d5edf150537 100644
--- a/wgengine/wgcfg/nmcfg/nmcfg.go
+++ b/wgengine/wgcfg/nmcfg/nmcfg.go
@@ -40,8 +40,7 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool {
if !cidr.IsSingleIP() {
return true
}
- for i := range node.Addresses().Len() {
- selfCIDR := node.Addresses().At(i)
+ for _, selfCIDR := range node.Addresses().All() {
if cidr == selfCIDR {
return false
}
@@ -110,8 +109,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer()
cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer()
cpeer.IsJailed = peer.IsJailed()
- for i := range peer.AllowedIPs().Len() {
- allowedIP := peer.AllowedIPs().At(i)
+ for _, allowedIP := range peer.AllowedIPs().All() {
if allowedIP.Bits() == 0 && peer.StableID() != exitNode {
if didExitNodeWarn {
// Don't log about both the IPv4 /0 and IPv6 /0.
diff --git a/words/scales.txt b/words/scales.txt
index f27dfc5c4aa36..2fe849bb9cee1 100644
--- a/words/scales.txt
+++ b/words/scales.txt
@@ -391,3 +391,11 @@ godzilla
sirius
vector
cherimoya
+shilling
+kettle
+kitchen
+fahrenheit
+rankine
+piano
+ruler
+scoville