diff --git a/Dockerfile b/Dockerfile
index 8534563f6cf57..8166000eda6fc 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,17 +1,13 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
-############################################################################
+# Note that this Dockerfile is currently NOT used to build any of the published
+# Tailscale container images and may have drifted from the image build mechanism
+# we use.
+# Tailscale images are currently built using https://github.com/tailscale/mkctr,
+# and the build script can be found in ./build_docker.sh.
#
-# WARNING: Tailscale is not yet officially supported in container
-# environments, such as Docker and Kubernetes. Though it should work, we
-# don't regularly test it, and we know there are some feature limitations.
#
-# See current bugs tagged "containers":
-# https://github.com/tailscale/tailscale/labels/containers
-#
-############################################################################
-
# This Dockerfile includes all the tailscale binaries.
#
# To build the Dockerfile:
@@ -46,7 +42,7 @@ RUN go install \
gvisor.dev/gvisor/pkg/tcpip/stack \
golang.org/x/crypto/ssh \
golang.org/x/crypto/acme \
- nhooyr.io/websocket \
+ github.com/coder/websocket \
github.com/mdlayher/netlink
COPY . .
diff --git a/VERSION.txt b/VERSION.txt
index 832e9afb6c139..0834888f55808 100644
--- a/VERSION.txt
+++ b/VERSION.txt
@@ -1 +1 @@
-1.70.0
+1.72.0
diff --git a/build_docker.sh b/build_docker.sh
index 43665172a60ec..1cbdc4b9ef8e8 100755
--- a/build_docker.sh
+++ b/build_docker.sh
@@ -1,21 +1,11 @@
#!/usr/bin/env sh
-
-#
-# Runs `go build` with flags configured for docker distribution. All
-# it does differently from `go build` is burn git commit and version
-# information into the binaries inside docker, so that we can track down user
-# issues.
-#
-############################################################################
-#
-# WARNING: Tailscale is not yet officially supported in container
-# environments, such as Docker and Kubernetes. Though it should work, we
-# don't regularly test it, and we know there are some feature limitations.
-#
-# See current bugs tagged "containers":
-# https://github.com/tailscale/tailscale/labels/containers
#
-############################################################################
+# This script builds Tailscale container images using
+# github.com/tailscale/mkctr.
+# By default the images will be tagged with the current version and git
+# hash of this repository as produced by ./cmd/mkversion.
+# This is the image build mechanim used to build the official Tailscale
+# container images.
set -eu
@@ -49,7 +39,7 @@ case "$TARGET" in
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
- --gotags="ts_kube" \
+ --gotags="ts_kube,ts_package_container" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go
index 48a030a8bd7b5..fc672ded881b4 100644
--- a/client/tailscale/acl.go
+++ b/client/tailscale/acl.go
@@ -161,7 +161,12 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
// ACLTestFailureSummary specifies the JSON format sent to the
// JavaScript client to be rendered in the HTML.
type ACLTestFailureSummary struct {
- User string `json:"user,omitempty"`
+ // User is the source ("src") value of the ACL test that failed.
+ // The name "user" is a legacy holdover from the original naming and
+ // is kept for compatibility but it may also contain any value
+ // that's valid in a ACL test "src" field.
+ User string `json:"user,omitempty"`
+
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
@@ -281,6 +286,9 @@ type UserRuleMatch struct {
Users []string `json:"users"`
Ports []string `json:"ports"`
LineNumber int `json:"lineNumber"`
+ // Via is the list of targets through which Users can access Ports.
+ // See https://tailscale.com/kb/1378/via for more information.
+ Via []string `json:"via,omitempty"`
// Postures is a list of posture policies that are
// associated with this match. The rules can be looked
diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go
index 67bd0c5cfaba5..6f27e56b86fc4 100644
--- a/client/tailscale/localclient.go
+++ b/client/tailscale/localclient.go
@@ -69,6 +69,14 @@ type LocalClient struct {
// connecting to the GUI client variants.
UseSocketOnly bool
+ // OmitAuth, if true, omits sending the local Tailscale daemon any
+ // authentication token that might be required by the platform.
+ //
+ // As of 2024-08-12, only macOS uses an authentication token. OmitAuth is
+ // meant for when Dial is set and the LocalAPI is being proxied to a
+ // different operating system, such as in integration tests.
+ OmitAuth bool
+
// tsClient does HTTP requests to the local Tailscale daemon.
// It's lazily initialized on first use.
tsClient *http.Client
@@ -124,8 +132,10 @@ func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error)
},
}
})
- if _, token, err := safesocket.LocalTCPPortAndToken(); err == nil {
- req.SetBasicAuth("", token)
+ if !lc.OmitAuth {
+ if _, token, err := safesocket.LocalTCPPortAndToken(); err == nil {
+ req.SetBasicAuth("", token)
+ }
}
return lc.tsClient.Do(req)
}
@@ -933,7 +943,20 @@ func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err e
//
// API maturity: this is considered a stable API.
func (lc *LocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) {
- res, err := lc.send(ctx, "GET", "/localapi/v0/cert/"+domain+"?type=pair", 200, nil)
+ return lc.CertPairWithValidity(ctx, domain, 0)
+}
+
+// CertPairWithValidity returns a cert and private key for the provided DNS
+// domain.
+//
+// It returns a cached certificate from disk if it's still valid.
+// When minValidity is non-zero, the returned certificate will be valid for at
+// least the given duration, if permitted by the CA. If the certificate is
+// valid, but for less than minValidity, it will be synchronously renewed.
+//
+// API maturity: this is considered a stable API.
+func (lc *LocalClient) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) {
+ res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil)
if err != nil {
return nil, nil, err
}
diff --git a/client/web/package.json b/client/web/package.json
index 598ae3e518b1c..4b3afb1df6ef5 100644
--- a/client/web/package.json
+++ b/client/web/package.json
@@ -3,7 +3,7 @@
"version": "0.0.1",
"license": "BSD-3-Clause",
"engines": {
- "node": "18.16.1",
+ "node": "18.20.4",
"yarn": "1.22.19"
},
"type": "module",
diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go
index 630ef0bf6bba2..fdf71c3eae398 100644
--- a/cmd/containerboot/main.go
+++ b/cmd/containerboot/main.go
@@ -52,6 +52,12 @@
// ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN.
// It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes,
// and will be re-applied when it changes.
+// - TS_HEALTHCHECK_ADDR_PORT: if specified, an HTTP health endpoint will be
+// served at /healthz at the provided address, which should be in form [
]:.
+// If not set, no health check will be run. If set to :, addr will default to 0.0.0.0
+// The health endpoint will return 200 OK if this node has at least one tailnet IP address,
+// otherwise returns 503.
+// NB: the health criteria might change in the future.
// - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a
// directory that containers tailscaled config in file. The config file needs to be
// named cap-.hujson. If this is set, TS_HOSTNAME,
@@ -95,6 +101,7 @@ import (
"log"
"math"
"net"
+ "net/http"
"net/netip"
"os"
"os/exec"
@@ -158,6 +165,7 @@ func main() {
AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false),
PodIP: defaultEnv("POD_IP", ""),
EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false),
+ HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""),
}
if err := cfg.validate(); err != nil {
@@ -349,6 +357,9 @@ authLoop:
certDomain = new(atomic.Pointer[string])
certDomainChanged = make(chan bool, 1)
+
+ h = &healthz{} // http server for the healthz endpoint
+ healthzRunner = sync.OnceFunc(func() { runHealthz(cfg.HealthCheckAddrPort, h) })
)
if cfg.ServeConfigPath != "" {
go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client)
@@ -565,6 +576,13 @@ runLoop:
log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err)
}
}
+
+ if cfg.HealthCheckAddrPort != "" {
+ h.Lock()
+ h.hasAddrs = len(addrs) != 0
+ h.Unlock()
+ healthzRunner()
+ }
}
if !startupTasksDone {
// For containerboot instances that act as TCP
@@ -1152,7 +1170,8 @@ type settings struct {
// PodIP is the IP of the Pod if running in Kubernetes. This is used
// when setting up rules to proxy cluster traffic to cluster ingress
// target.
- PodIP string
+ PodIP string
+ HealthCheckAddrPort string
}
func (s *settings) validate() error {
@@ -1201,6 +1220,11 @@ func (s *settings) validate() error {
if s.EnableForwardingOptimizations && s.UserspaceMode {
return errors.New("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS is not supported in userspace mode")
}
+ if s.HealthCheckAddrPort != "" {
+ if _, err := netip.ParseAddrPort(s.HealthCheckAddrPort); err != nil {
+ return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err)
+ }
+ }
return nil
}
@@ -1374,3 +1398,41 @@ func tailscaledConfigFilePath() string {
log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion)
return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer))
}
+
+// healthz is a simple health check server, if enabled it returns 200 OK if
+// this tailscale node currently has at least one tailnet IP address else
+// returns 503.
+type healthz struct {
+ sync.Mutex
+ hasAddrs bool
+}
+
+func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ h.Lock()
+ defer h.Unlock()
+ if h.hasAddrs {
+ w.Write([]byte("ok"))
+ } else {
+ http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError)
+ }
+}
+
+// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the
+// provided address. A containerized tailscale instance is considered healthy if
+// it has at least one tailnet IP address.
+func runHealthz(addr string, h *healthz) {
+ lis, err := net.Listen("tcp", addr)
+ if err != nil {
+ log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err)
+ }
+ mux := http.NewServeMux()
+ mux.Handle("/healthz", h)
+ log.Printf("Running healthcheck endpoint at %s/healthz", addr)
+ hs := &http.Server{Handler: mux}
+
+ go func() {
+ if err := hs.Serve(lis); err != nil {
+ log.Fatalf("failed running health endpoint: %v", err)
+ }
+ }()
+}
diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt
index 825b33facd4c1..1492c4ebb4721 100644
--- a/cmd/derper/depaware.txt
+++ b/cmd/derper/depaware.txt
@@ -7,10 +7,14 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy
github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus
💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus
+ github.com/coder/websocket from tailscale.com/cmd/derper+
+ github.com/coder/websocket/internal/errd from github.com/coder/websocket
+ github.com/coder/websocket/internal/util from github.com/coder/websocket
+ github.com/coder/websocket/internal/xsync from github.com/coder/websocket
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil
github.com/fxamacker/cbor/v2 from tailscale.com/tka
- github.com/go-json-experiment/json from tailscale.com/types/opt
+ github.com/go-json-experiment/json from tailscale.com/types/opt+
github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+
github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+
github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+
@@ -82,10 +86,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+
google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+
google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
- nhooyr.io/websocket from tailscale.com/cmd/derper+
- nhooyr.io/websocket/internal/errd from nhooyr.io/websocket
- nhooyr.io/websocket/internal/util from nhooyr.io/websocket
- nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket
tailscale.com from tailscale.com/version
tailscale.com/atomicfile from tailscale.com/cmd/derper+
tailscale.com/client/tailscale from tailscale.com/derp
@@ -146,9 +146,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/util/cloudenv from tailscale.com/hostinfo+
W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy
tailscale.com/util/ctxkey from tailscale.com/tsweb+
+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
tailscale.com/util/dnsname from tailscale.com/hostinfo+
tailscale.com/util/fastuuid from tailscale.com/tsweb
+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash
tailscale.com/util/httpm from tailscale.com/client/tailscale
tailscale.com/util/lineread from tailscale.com/hostinfo+
L tailscale.com/util/linuxfw from tailscale.com/net/netns
@@ -159,6 +161,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/util/singleflight from tailscale.com/net/dnscache
tailscale.com/util/slicesx from tailscale.com/cmd/derper+
tailscale.com/util/syspolicy from tailscale.com/ipn
+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting
+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy
tailscale.com/util/vizerror from tailscale.com/tailcfg+
W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+
W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+
@@ -180,6 +184,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
W golang.org/x/exp/constraints from tailscale.com/util/winutil
+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting
L golang.org/x/net/bpf from github.com/mdlayher/netlink+
golang.org/x/net/dns/dnsmessage from net+
golang.org/x/net/http/httpguts from net/http
diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go
index 76151175c5c13..80c9dc44f138f 100644
--- a/cmd/derper/derper.go
+++ b/cmd/derper/derper.go
@@ -237,7 +237,7 @@ func main() {
tsweb.AddBrowserHeaders(w)
io.WriteString(w, "User-agent: *\nDisallow: /\n")
}))
- mux.Handle("/generate_204", http.HandlerFunc(serveNoContent))
+ mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent))
debug := tsweb.Debugger(mux)
debug.KV("TLS hostname", *hostname)
debug.KV("Mesh key", s.HasMeshKey())
@@ -337,7 +337,7 @@ func main() {
if *httpPort > -1 {
go func() {
port80mux := http.NewServeMux()
- port80mux.HandleFunc("/generate_204", serveNoContent)
+ port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent)
port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux}))
port80srv := &http.Server{
Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)),
@@ -378,31 +378,6 @@ func main() {
}
}
-const (
- noContentChallengeHeader = "X-Tailscale-Challenge"
- noContentResponseHeader = "X-Tailscale-Response"
-)
-
-// For captive portal detection
-func serveNoContent(w http.ResponseWriter, r *http.Request) {
- if challenge := r.Header.Get(noContentChallengeHeader); challenge != "" {
- badChar := strings.IndexFunc(challenge, func(r rune) bool {
- return !isChallengeChar(r)
- }) != -1
- if len(challenge) <= 64 && !badChar {
- w.Header().Set(noContentResponseHeader, "response "+challenge)
- }
- }
- w.WriteHeader(http.StatusNoContent)
-}
-
-func isChallengeChar(c rune) bool {
- // Semi-randomly chosen as a limited set of valid characters
- return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
- ('0' <= c && c <= '9') ||
- c == '.' || c == '-' || c == '_'
-}
-
var validProdHostname = regexp.MustCompile(`^derp([^.]*)\.tailscale\.com\.?$`)
func prodAutocertHostPolicy(_ context.Context, host string) error {
diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go
index 1af7c3abeacf1..553a78f9f6426 100644
--- a/cmd/derper/derper_test.go
+++ b/cmd/derper/derper_test.go
@@ -10,6 +10,7 @@ import (
"strings"
"testing"
+ "tailscale.com/derp/derphttp"
"tailscale.com/tstest/deptest"
)
@@ -76,20 +77,20 @@ func TestNoContent(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil)
if tt.input != "" {
- req.Header.Set(noContentChallengeHeader, tt.input)
+ req.Header.Set(derphttp.NoContentChallengeHeader, tt.input)
}
w := httptest.NewRecorder()
- serveNoContent(w, req)
+ derphttp.ServeNoContent(w, req)
resp := w.Result()
if tt.want == "" {
- if h, found := resp.Header[noContentResponseHeader]; found {
+ if h, found := resp.Header[derphttp.NoContentResponseHeader]; found {
t.Errorf("got %+v; expected no response header", h)
}
return
}
- if got := resp.Header.Get(noContentResponseHeader); got != tt.want {
+ if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want {
t.Errorf("got %q; want %q", got, tt.want)
}
})
diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go
index 83ab388765056..05f40deb816d5 100644
--- a/cmd/derper/websocket.go
+++ b/cmd/derper/websocket.go
@@ -10,7 +10,7 @@ import (
"net/http"
"strings"
- "nhooyr.io/websocket"
+ "github.com/coder/websocket"
"tailscale.com/derp"
"tailscale.com/net/wsconn"
)
diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go
index 117d735820792..1d0ec32c3c064 100644
--- a/cmd/derpprobe/derpprobe.go
+++ b/cmd/derpprobe/derpprobe.go
@@ -7,8 +7,6 @@ package main
import (
"flag"
"fmt"
- "html"
- "io"
"log"
"net/http"
"sort"
@@ -70,8 +68,13 @@ func main() {
}
mux := http.NewServeMux()
- tsweb.Debugger(mux)
- mux.HandleFunc("/", http.HandlerFunc(serveFunc(p)))
+ d := tsweb.Debugger(mux)
+ d.Handle("probe-run", "Run a probe", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{Logf: log.Printf}))
+ mux.Handle("/", tsweb.StdHandler(p.StatusHandler(
+ prober.WithTitle("DERP Prober"),
+ prober.WithPageLink("Prober metrics", "/debug/varz"),
+ prober.WithProbeLink("Run Probe", "/debug/probe-run?name={{.Name}}"),
+ ), tsweb.HandlerOptions{Logf: log.Printf}))
log.Printf("Listening on %s", *listen)
log.Fatal(http.ListenAndServe(*listen, mux))
}
@@ -105,26 +108,3 @@ func getOverallStatus(p *prober.Prober) (o overallStatus) {
sort.Strings(o.good)
return
}
-
-func serveFunc(p *prober.Prober) func(w http.ResponseWriter, r *http.Request) {
- return func(w http.ResponseWriter, r *http.Request) {
- st := getOverallStatus(p)
- summary := "All good"
- if (float64(len(st.bad)) / float64(len(st.bad)+len(st.good))) > 0.25 {
- // Returning a 500 allows monitoring this server externally and configuring
- // an alert on HTTP response code.
- w.WriteHeader(500)
- summary = fmt.Sprintf("%d problems", len(st.bad))
- }
-
- io.WriteString(w, "\n")
- fmt.Fprintf(w, "
derp probe
\n%s:
", summary)
- for _, s := range st.bad {
- fmt.Fprintf(w, "
%s
\n", html.EscapeString(s))
- }
- for _, s := range st.good {
- fmt.Fprintf(w, "
- ConnectorStatus describes the status of the Connector. This is set
-and managed by the Tailscale operator.
-
-
false
-
-
-### Connector.spec
-[↩ Parent](#connector)
+_Appears in:_
+- [ConnectorList](#connectorlist)
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
+| `kind` _string_ | `Connector` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[ConnectorSpec](#connectorspec)_ | ConnectorSpec describes the desired Tailscale component. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | |
+| `status` _[ConnectorStatus](#connectorstatus)_ | ConnectorStatus describes the status of the Connector. This is set and managed by the Tailscale operator. | | |
-ConnectorSpec describes the desired Tailscale component.
-More info:
-https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
exitNode
-
boolean
-
- ExitNode defines whether the Connector node should act as a
-Tailscale exit node. Defaults to false.
-https://tailscale.com/kb/1103/exit-nodes
-
-
false
-
-
hostname
-
string
-
- Hostname is the tailnet hostname that should be assigned to the
-Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and
-dashes, it must not start or end with a dash and must be between 2
-and 63 characters long.
-
-
false
-
-
proxyClass
-
string
-
- ProxyClass is the name of the ProxyClass custom resource that
-contains configuration options that should be applied to the
-resources created for this Connector. If unset, the operator will
-create resources with the default configuration.
-
- SubnetRouter defines subnet routes that the Connector node should
-expose to tailnet. If unset, none are exposed.
-https://tailscale.com/kb/1019/subnets/
-
-
false
-
-
tags
-
[]string
-
- Tags that the Tailscale node will be tagged with.
-Defaults to [tag:k8s].
-To autoapprove the subnet routes or exit node defined by a Connector,
-you can configure Tailscale ACLs to give these tags the necessary
-permissions.
-See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes.
-If you specify custom tags here, you must also make the operator an owner of these tags.
-See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
-Tags cannot be changed once a Connector node has been created.
-Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$.
-
-
false
-
-
-
-
-### Connector.spec.subnetRouter
-[↩ Parent](#connectorspec)
-
-
-
-SubnetRouter defines subnet routes that the Connector node should
-expose to tailnet. If unset, none are exposed.
-https://tailscale.com/kb/1019/subnets/
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
advertiseRoutes
-
[]string
-
- AdvertiseRoutes refer to CIDRs that the subnet router should make
-available. Route values must be strings that represent a valid IPv4
-or IPv6 CIDR range. Values can be Tailscale 4via6 subnet routes.
-https://tailscale.com/kb/1201/4via6-subnets/
-
-
true
-
-
-
-
-### Connector.status
-[↩ Parent](#connector)
-
-
-
-ConnectorStatus describes the status of the Connector. This is set
-and managed by the Tailscale operator.
-
-
- List of status conditions to indicate the status of the Connector.
-Known condition types are `ConnectorReady`.
-
-
false
-
-
hostname
-
string
-
- Hostname is the fully qualified domain name of the Connector node.
-If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
-node.
-
-
false
-
-
isExitNode
-
boolean
-
- IsExitNode is set to true if the Connector acts as an exit node.
-
-
false
-
-
subnetRoutes
-
string
-
- SubnetRoutes are the routes currently exposed to tailnet via this
-Connector instance.
-
-
false
-
-
tailnetIPs
-
[]string
-
- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
-assigned to the Connector node.
-
-
false
-
-
-
-
-### Connector.status.conditions[index]
-[↩ Parent](#connectorstatus)
-
-
-
-Condition contains details for one aspect of the current state of this API Resource.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
lastTransitionTime
-
string
-
- lastTransitionTime is the last time the condition transitioned from one status to another.
-This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
-
- Format: date-time
-
-
true
-
-
message
-
string
-
- message is a human readable message indicating details about the transition.
-This may be an empty string.
-
-
true
-
-
reason
-
string
-
- reason contains a programmatic identifier indicating the reason for the condition's last transition.
-Producers of specific condition types may define expected values and meanings for this field,
-and whether the values are considered a guaranteed API.
-The value should be a CamelCase string.
-This field may not be empty.
-
-
true
-
-
status
-
enum
-
- status of the condition, one of True, False, Unknown.
-
- Enum: True, False, Unknown
-
-
true
-
-
type
-
string
-
- type of condition in CamelCase or in foo.example.com/CamelCase.
-
-
true
-
-
observedGeneration
-
integer
-
- observedGeneration represents the .metadata.generation that the condition was set based upon.
-For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
-with respect to the current state of the instance.
-
- Format: int64
- Minimum: 0
-
-
false
-
-
-
-## DNSConfig
-[↩ Parent](#tailscalecomv1alpha1 )
+#### ConnectorList
+
+
+
+
+
+
+
+
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
+| `kind` _string_ | `ConnectorList` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[Connector](#connector) array_ | | | |
+
+
+#### ConnectorSpec
+
+
+
+ConnectorSpec describes a Tailscale node to be deployed in the cluster.
+
+
+
+_Appears in:_
+- [Connector](#connector)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with. Defaults to [tag:k8s]. To autoapprove the subnet routes or exit node defined by a Connector, you can configure Tailscale ACLs to give these tags the necessary permissions. See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes. If you specify custom tags here, you must also make the operator an owner of these tags. See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. Tags cannot be changed once a Connector node has been created. Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$` Type: string |
+| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$` Type: string |
+| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains configuration options that should be applied to the resources created for this Connector. If unset, the operator will create resources with the default configuration. | | |
+| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector node should expose to tailnet. If unset, none are exposed. https://tailscale.com/kb/1019/subnets/ | | |
+| `exitNode` _boolean_ | ExitNode defines whether the Connector node should act as a Tailscale exit node. Defaults to false. https://tailscale.com/kb/1103/exit-nodes | | |
+
+
+#### ConnectorStatus
+
+
+
+ConnectorStatus defines the observed state of the Connector.
+
+
+
+_Appears in:_
+- [Connector](#connector)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Connector. Known condition types are `ConnectorReady`. | | |
+| `subnetRoutes` _string_ | SubnetRoutes are the routes currently exposed to tailnet via this Connector instance. | | |
+| `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | |
+| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) assigned to the Connector node. | | |
+| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. | | |
+
+
+#### Container
+
+_Appears in:_
+- [Pod](#pod)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `env` _[Env](#env) array_ | List of environment variables to set in the container. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables Note that environment variables provided here will take precedence over Tailscale-specific environment variables set by the operator, however running proxies with custom values for Tailscale environment variables (i.e TS_USERSPACE) is not recommended and might break in the future. | | |
+| `image` _string_ | Container image name. By default images are pulled from docker.io/tailscale/tailscale, but the official images are also available at ghcr.io/tailscale/tailscale. Specifying image name here will override any proxy image values specified via the Kubernetes operator's Helm chart values or PROXY_IMAGE env var in the operator Deployment. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | |
+| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent] |
+| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements. By default Tailscale Kubernetes operator does not apply any resource requirements. The amount of resources required wil depend on the amount of resources the operator needs to parse, usage patterns and cluster size. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | |
+| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context. Security context specified here will override the security context by the operator. By default the operator: - sets 'privileged: true' for the init container - set NET_ADMIN capability for tailscale container for proxies that are created for Services or Connector. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | |
+
+
+#### DNSConfig
+
+
+
DNSConfig can be deployed to cluster to make a subset of Tailscale MagicDNS
names resolvable by cluster workloads. Use this if: A) you need to refer to
tailnet services, exposed to cluster via Tailscale Kubernetes operator egress
@@ -362,300 +157,216 @@ tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to
ensure that the proxy created for the Ingress listens on its Pod IP address.
NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported.
-
- Status describes the status of the DNSConfig. This is set
-and managed by the Tailscale operator.
-
-
false
-
-
-### DNSConfig.spec
-[↩ Parent](#dnsconfig)
+_Appears in:_
+- [DNSConfigList](#dnsconfiglist)
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
+| `kind` _string_ | `DNSConfig` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[DNSConfigSpec](#dnsconfigspec)_ | Spec describes the desired DNS configuration. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | |
+| `status` _[DNSConfigStatus](#dnsconfigstatus)_ | Status describes the status of the DNSConfig. This is set and managed by the Tailscale operator. | | |
-Spec describes the desired DNS configuration.
-More info:
-https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
-
- Configuration for a nameserver that can resolve ts.net DNS names
-associated with in-cluster proxies for Tailscale egress Services and
-Tailscale Ingresses. The operator will always deploy this nameserver
-when a DNSConfig is applied.
-
-
true
-
-
-
-
-### DNSConfig.spec.nameserver
-[↩ Parent](#dnsconfigspec)
-
-
-
-Configuration for a nameserver that can resolve ts.net DNS names
-associated with in-cluster proxies for Tailscale egress Services and
-Tailscale Ingresses. The operator will always deploy this nameserver
-when a DNSConfig is applied.
-
-
-
-
-### DNSConfig.status
-[↩ Parent](#dnsconfig)
-
-
-
-Status describes the status of the DNSConfig. This is set
-and managed by the Tailscale operator.
-
-
- Nameserver describes the status of nameserver cluster resources.
-
-
false
-
-
-
-
-### DNSConfig.status.conditions[index]
-[↩ Parent](#dnsconfigstatus)
-
-
-
-Condition contains details for one aspect of the current state of this API Resource.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
lastTransitionTime
-
string
-
- lastTransitionTime is the last time the condition transitioned from one status to another.
-This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
-
- Format: date-time
-
-
true
-
-
message
-
string
-
- message is a human readable message indicating details about the transition.
-This may be an empty string.
-
-
true
-
-
reason
-
string
-
- reason contains a programmatic identifier indicating the reason for the condition's last transition.
-Producers of specific condition types may define expected values and meanings for this field,
-and whether the values are considered a guaranteed API.
-The value should be a CamelCase string.
-This field may not be empty.
-
-
true
-
-
status
-
enum
-
- status of the condition, one of True, False, Unknown.
-
- Enum: True, False, Unknown
-
-
true
-
-
type
-
string
-
- type of condition in CamelCase or in foo.example.com/CamelCase.
-
-
true
-
-
observedGeneration
-
integer
-
- observedGeneration represents the .metadata.generation that the condition was set based upon.
-For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
-with respect to the current state of the instance.
-
- Format: int64
- Minimum: 0
-
-
false
-
-
-
-
-### DNSConfig.status.nameserver
-[↩ Parent](#dnsconfigstatus)
-
-
-
-Nameserver describes the status of nameserver cluster resources.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
ip
-
string
-
- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
-Currently you must manually update your cluster DNS config to add
-this address as a stub nameserver for ts.net for cluster workloads to be
-able to resolve MagicDNS names associated with egress or Ingress
-proxies.
-The IP address will change if you delete and recreate the DNSConfig.
-
-
false
-
-
-
-## ProxyClass
-[↩ Parent](#tailscalecomv1alpha1 )
+#### DNSConfigList
+
+
+
+
+
+
+
+
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
+| `kind` _string_ | `DNSConfigList` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[DNSConfig](#dnsconfig) array_ | | | |
+
+
+#### DNSConfigSpec
+
+
+
+
+
+
+
+_Appears in:_
+- [DNSConfig](#dnsconfig)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `nameserver` _[Nameserver](#nameserver)_ | Configuration for a nameserver that can resolve ts.net DNS names associated with in-cluster proxies for Tailscale egress Services and Tailscale Ingresses. The operator will always deploy this nameserver when a DNSConfig is applied. | | |
+
+
+#### DNSConfigStatus
+
+
+
+
+
+
+
+_Appears in:_
+- [DNSConfig](#dnsconfig)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | | | |
+| `nameserver` _[NameserverStatus](#nameserverstatus)_ | Nameserver describes the status of nameserver cluster resources. | | |
+
+
+#### Env
+
+
+
+
+
+
+
+_Appears in:_
+- [Container](#container)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `name` _[Name](#name)_ | Name of the environment variable. Must be a C_IDENTIFIER. | | Pattern: `^[-._a-zA-Z][-._a-zA-Z0-9]*$` Type: string |
+| `value` _string_ | Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". | | |
+
+
+#### Hostname
+
+_Underlying type:_ _string_
+
+
+
+_Validation:_
+- Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
+- Type: string
+
+_Appears in:_
+- [ConnectorSpec](#connectorspec)
+
+
+
+#### Image
+
+
+
+
+
+
+
+_Appears in:_
+- [Nameserver](#nameserver)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `repo` _string_ | Repo defaults to tailscale/k8s-nameserver. | | |
+| `tag` _string_ | Tag defaults to operator's own tag. | | |
+
+
+#### Metrics
+
+
+
+
+
+
+
+_Appears in:_
+- [ProxyClassSpec](#proxyclassspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics at :9001/debug/metrics. Defaults to false. | | |
+
+
+#### Name
+
+_Underlying type:_ _string_
+
+
+
+_Validation:_
+- Pattern: `^[-._a-zA-Z][-._a-zA-Z0-9]*$`
+- Type: string
+
+_Appears in:_
+- [Env](#env)
+
+
+
+#### Nameserver
+
+_Appears in:_
+- [DNSConfigSpec](#dnsconfigspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `image` _[Image](#image)_ | Nameserver image. | | |
+
+
+#### NameserverStatus
+
+
+
+
+
+
+
+_Appears in:_
+- [DNSConfigStatus](#dnsconfigstatus)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. Currently you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. The IP address will change if you delete and recreate the DNSConfig. | | |
+
+
+#### Pod
+
+
+
+
+
+
+
+_Appears in:_
+- [StatefulSet](#statefulset)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `labels` _object (keys:string, values:string)_ | Labels that will be added to the proxy Pod. Any labels specified here will be merged with the default labels applied to the Pod by the Tailscale Kubernetes operator. Label keys and values must be valid Kubernetes label keys and values. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | |
+| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the proxy Pod. Any annotations specified here will be merged with the default annotations applied to the Pod by the Tailscale Kubernetes operator. Annotations must be valid Kubernetes annotations. https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | |
+| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Proxy Pod's affinity rules. By default, the Tailscale Kubernetes operator does not apply any affinity rules. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | |
+| `tailscaleContainer` _[Container](#container)_ | Configuration for the proxy container running tailscale. | | |
+| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding. | | |
+| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Proxy Pod's security context. By default Tailscale Kubernetes operator does not apply any Pod security context. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | |
+| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Proxy Pod's image pull Secrets. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | |
+| `nodeName` _string_ | Proxy Pod's node name. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
+| `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector. By default Tailscale Kubernetes operator does not apply any node selector. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
+| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations. By default Tailscale Kubernetes operator does not apply any tolerations. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | |
+
+
+#### ProxyClass
+
+
+
ProxyClass describes a set of configuration parameters that can be applied to
proxy resources created by the Tailscale Kubernetes operator.
To apply a given ProxyClass to resources created for a tailscale Ingress or
@@ -666,3749 +377,185 @@ ProxyClass is a cluster scoped resource.
More info:
https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource.
-
- Specification of the desired state of the ProxyClass resource.
-https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
- Status of the ProxyClass. This is set and managed automatically.
-https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
-
false
-
-
-
-
-### ProxyClass.spec
-[↩ Parent](#proxyclass)
-
-
-
-Specification of the desired state of the ProxyClass resource.
-https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
-
- Configuration for proxy metrics. Metrics are currently not supported
-for egress proxies and for Ingress proxies that have been configured
-with tailscale.com/experimental-forward-cluster-traffic-via-ingress
-annotation. Note that the metrics are currently considered unstable
-and will likely change in breaking ways in the future - we only
-recommend that you use those for debugging purposes.
-
- Configuration parameters for the proxy's StatefulSet. Tailscale
-Kubernetes operator deploys a StatefulSet for each of the user
-configured proxies (Tailscale Ingress, Tailscale Service, Connector).
-
- TailscaleConfig contains options to configure the tailscale-specific
-parameters of proxies.
-
-
false
-
-
-
-
-### ProxyClass.spec.metrics
-[↩ Parent](#proxyclassspec)
-
-
-
-Configuration for proxy metrics. Metrics are currently not supported
-for egress proxies and for Ingress proxies that have been configured
-with tailscale.com/experimental-forward-cluster-traffic-via-ingress
-annotation. Note that the metrics are currently considered unstable
-and will likely change in breaking ways in the future - we only
-recommend that you use those for debugging purposes.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
enable
-
boolean
-
- Setting enable to true will make the proxy serve Tailscale metrics
-at :9001/debug/metrics.
-Defaults to false.
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet
-[↩ Parent](#proxyclassspec)
-
-
-
-Configuration parameters for the proxy's StatefulSet. Tailscale
-Kubernetes operator deploys a StatefulSet for each of the user
-configured proxies (Tailscale Ingress, Tailscale Service, Connector).
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
annotations
-
map[string]string
-
- Annotations that will be added to the StatefulSet created for the proxy.
-Any Annotations specified here will be merged with the default annotations
-applied to the StatefulSet by the Tailscale Kubernetes operator as
-well as any other annotations that might have been applied by other
-actors.
-Annotations must be valid Kubernetes annotations.
-https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
-
-
false
-
-
labels
-
map[string]string
-
- Labels that will be added to the StatefulSet created for the proxy.
-Any labels specified here will be merged with the default labels
-applied to the StatefulSet by the Tailscale Kubernetes operator as
-well as any other labels that might have been applied by other
-actors.
-Label keys and values must be valid Kubernetes label keys and values.
-https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
-
- Proxy Pod's affinity rules.
-By default, the Tailscale Kubernetes operator does not apply any affinity rules.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity
-
-
false
-
-
annotations
-
map[string]string
-
- Annotations that will be added to the proxy Pod.
-Any annotations specified here will be merged with the default
-annotations applied to the Pod by the Tailscale Kubernetes operator.
-Annotations must be valid Kubernetes annotations.
-https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
-
- Labels that will be added to the proxy Pod.
-Any labels specified here will be merged with the default labels
-applied to the Pod by the Tailscale Kubernetes operator.
-Label keys and values must be valid Kubernetes label keys and values.
-https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
-
- Proxy Pod's security context.
-By default Tailscale Kubernetes operator does not apply any Pod
-security context.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2
-
- The scheduler will prefer to schedule pods to nodes that satisfy
-the affinity expressions specified by this field, but it may choose
-a node that violates one or more of the expressions. The node that is
-most preferred is the one with the greatest sum of weights, i.e.
-for each node that meets all of the scheduling requirements (resource
-request, requiredDuringScheduling affinity expressions, etc.),
-compute a sum by iterating through the elements of this field and adding
-"weight" to the sum if the node matches the corresponding matchExpressions; the
-node(s) with the highest sum are the most preferred.
-
- If the affinity requirements specified by this field are not met at
-scheduling time, the pod will not be scheduled onto the node.
-If the affinity requirements specified by this field cease to be met
-at some point during pod execution (e.g. due to an update), the system
-may or may not try to eventually evict the pod from its node.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinity)
-
-
-
-An empty preferred scheduling term matches all objects with implicit weight 0
-(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
-
-
- A list of node selector requirements by node's fields.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionindexpreference)
-
-
-
-A node selector requirement is a selector that contains values, a key, and an operator
-that relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- The label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- Represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
-
-
true
-
-
values
-
[]string
-
- An array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. If the operator is Gt or Lt, the values
-array must have a single element, which will be interpreted as an integer.
-This array is replaced during a strategic merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionindexpreference)
-
-
-
-A node selector requirement is a selector that contains values, a key, and an operator
-that relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- The label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- Represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
-
-
true
-
-
values
-
[]string
-
- An array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. If the operator is Gt or Lt, the values
-array must have a single element, which will be interpreted as an integer.
-This array is replaced during a strategic merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinity)
-
-
-
-If the affinity requirements specified by this field are not met at
-scheduling time, the pod will not be scheduled onto the node.
-If the affinity requirements specified by this field cease to be met
-at some point during pod execution (e.g. due to an update), the system
-may or may not try to eventually evict the pod from its node.
-
-
- Required. A list of node selector terms. The terms are ORed.
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinityrequiredduringschedulingignoredduringexecution)
-
-
-
-A null or empty node selector term matches no objects. The requirements of
-them are ANDed.
-The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
-
-
- A list of node selector requirements by node's fields.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsindex)
-
-
-
-A node selector requirement is a selector that contains values, a key, and an operator
-that relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- The label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- Represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
-
-
true
-
-
values
-
[]string
-
- An array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. If the operator is Gt or Lt, the values
-array must have a single element, which will be interpreted as an integer.
-This array is replaced during a strategic merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsindex)
-
-
-
-A node selector requirement is a selector that contains values, a key, and an operator
-that relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- The label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- Represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
-
-
true
-
-
values
-
[]string
-
- An array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. If the operator is Gt or Lt, the values
-array must have a single element, which will be interpreted as an integer.
-This array is replaced during a strategic merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity
-[↩ Parent](#proxyclassspecstatefulsetpodaffinity)
-
-
-
-Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
-
-
- The scheduler will prefer to schedule pods to nodes that satisfy
-the affinity expressions specified by this field, but it may choose
-a node that violates one or more of the expressions. The node that is
-most preferred is the one with the greatest sum of weights, i.e.
-for each node that meets all of the scheduling requirements (resource
-request, requiredDuringScheduling affinity expressions, etc.),
-compute a sum by iterating through the elements of this field and adding
-"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
-node(s) with the highest sum are the most preferred.
-
- If the affinity requirements specified by this field are not met at
-scheduling time, the pod will not be scheduled onto the node.
-If the affinity requirements specified by this field cease to be met
-at some point during pod execution (e.g. due to a pod label update), the
-system may or may not try to eventually evict the pod from its node.
-When there are multiple elements, the lists of nodes corresponding to each
-podAffinityTerm are intersected, i.e. all terms must be satisfied.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinity)
-
-
-
-The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
-
-
- Required. A pod affinity term, associated with the corresponding weight.
-
-
true
-
-
weight
-
integer
-
- weight associated with matching the corresponding podAffinityTerm,
-in the range 1-100.
-
- Format: int32
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindex)
-
-
-
-Required. A pod affinity term, associated with the corresponding weight.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
topologyKey
-
string
-
- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
-the labelSelector in the specified namespaces, where co-located is defined as running on a node
-whose value of the label with key topologyKey matches that of any node on which any of the
-selected pods is running.
-Empty topologyKey is not allowed.
-
- A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
false
-
-
matchLabelKeys
-
[]string
-
- MatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both matchLabelKeys and labelSelector.
-Also, matchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
-
false
-
-
mismatchLabelKeys
-
[]string
-
- MismatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
-Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
- A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
false
-
-
namespaces
-
[]string
-
- namespaces specifies a static list of namespace names that the term applies to.
-The term is applied to the union of the namespaces listed in this field
-and the ones selected by namespaceSelector.
-null or empty namespaces list and null namespaceSelector means "this pod's namespace".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm)
-
-
-
-A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermlabelselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm)
-
-
-
-A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermnamespaceselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinity)
-
-
-
-Defines a set of pods (namely those matching the labelSelector
-relative to the given namespace(s)) that this pod should be
-co-located (affinity) or not co-located (anti-affinity) with,
-where co-located is defined as running on a node whose value of
-the label with key matches that of any node on which
-a pod of the set of pods is running
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
topologyKey
-
string
-
- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
-the labelSelector in the specified namespaces, where co-located is defined as running on a node
-whose value of the label with key topologyKey matches that of any node on which any of the
-selected pods is running.
-Empty topologyKey is not allowed.
-
- A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
false
-
-
matchLabelKeys
-
[]string
-
- MatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both matchLabelKeys and labelSelector.
-Also, matchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
-
false
-
-
mismatchLabelKeys
-
[]string
-
- MismatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
-Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
- A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
false
-
-
namespaces
-
[]string
-
- namespaces specifies a static list of namespace names that the term applies to.
-The term is applied to the union of the namespaces listed in this field
-and the ones selected by namespaceSelector.
-null or empty namespaces list and null namespaceSelector means "this pod's namespace".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindex)
-
-
-
-A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindexlabelselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindex)
-
-
-
-A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindexnamespaceselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity
-[↩ Parent](#proxyclassspecstatefulsetpodaffinity)
-
-
-
-Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
-
-
- The scheduler will prefer to schedule pods to nodes that satisfy
-the anti-affinity expressions specified by this field, but it may choose
-a node that violates one or more of the expressions. The node that is
-most preferred is the one with the greatest sum of weights, i.e.
-for each node that meets all of the scheduling requirements (resource
-request, requiredDuringScheduling anti-affinity expressions, etc.),
-compute a sum by iterating through the elements of this field and adding
-"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
-node(s) with the highest sum are the most preferred.
-
- If the anti-affinity requirements specified by this field are not met at
-scheduling time, the pod will not be scheduled onto the node.
-If the anti-affinity requirements specified by this field cease to be met
-at some point during pod execution (e.g. due to a pod label update), the
-system may or may not try to eventually evict the pod from its node.
-When there are multiple elements, the lists of nodes corresponding to each
-podAffinityTerm are intersected, i.e. all terms must be satisfied.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinity)
-
-
-
-The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
-
-
- Required. A pod affinity term, associated with the corresponding weight.
-
-
true
-
-
weight
-
integer
-
- weight associated with matching the corresponding podAffinityTerm,
-in the range 1-100.
-
- Format: int32
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindex)
-
-
-
-Required. A pod affinity term, associated with the corresponding weight.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
topologyKey
-
string
-
- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
-the labelSelector in the specified namespaces, where co-located is defined as running on a node
-whose value of the label with key topologyKey matches that of any node on which any of the
-selected pods is running.
-Empty topologyKey is not allowed.
-
- A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
false
-
-
matchLabelKeys
-
[]string
-
- MatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both matchLabelKeys and labelSelector.
-Also, matchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
-
false
-
-
mismatchLabelKeys
-
[]string
-
- MismatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
-Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
- A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
false
-
-
namespaces
-
[]string
-
- namespaces specifies a static list of namespace names that the term applies to.
-The term is applied to the union of the namespaces listed in this field
-and the ones selected by namespaceSelector.
-null or empty namespaces list and null namespaceSelector means "this pod's namespace".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm)
-
-
-
-A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermlabelselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm)
-
-
-
-A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermnamespaceselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinity)
-
-
-
-Defines a set of pods (namely those matching the labelSelector
-relative to the given namespace(s)) that this pod should be
-co-located (affinity) or not co-located (anti-affinity) with,
-where co-located is defined as running on a node whose value of
-the label with key matches that of any node on which
-a pod of the set of pods is running
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
topologyKey
-
string
-
- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
-the labelSelector in the specified namespaces, where co-located is defined as running on a node
-whose value of the label with key topologyKey matches that of any node on which any of the
-selected pods is running.
-Empty topologyKey is not allowed.
-
- A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
false
-
-
matchLabelKeys
-
[]string
-
- MatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both matchLabelKeys and labelSelector.
-Also, matchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
-
false
-
-
mismatchLabelKeys
-
[]string
-
- MismatchLabelKeys is a set of pod label keys to select which pods will
-be taken into consideration. The keys are used to lookup values from the
-incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
-to select the group of existing pods which pods will be taken into consideration
-for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
-pod labels will be ignored. The default value is empty.
-The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
-Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
-This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
-
- A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
false
-
-
namespaces
-
[]string
-
- namespaces specifies a static list of namespace names that the term applies to.
-The term is applied to the union of the namespaces listed in this field
-and the ones selected by namespaceSelector.
-null or empty namespaces list and null namespaceSelector means "this pod's namespace".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindex)
-
-
-
-A label query over a set of resources, in this case pods.
-If it's null, this PodAffinityTerm matches with no Pods.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindexlabelselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindex)
-
-
-
-A label query over the set of namespaces that the term applies to.
-The term is applied to the union of the namespaces selected by this field
-and the ones listed in the namespaces field.
-null selector and null or empty namespaces list means "this pod's namespace".
-An empty selector ({}) matches all namespaces.
-
-
- matchExpressions is a list of label selector requirements. The requirements are ANDed.
-
-
false
-
-
matchLabels
-
map[string]string
-
- matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
-map is equivalent to an element of matchExpressions, whose key field is "key", the
-operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index]
-[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindexnamespaceselector)
-
-
-
-A label selector requirement is a selector that contains values, a key, and an operator that
-relates the key and values.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
key
-
string
-
- key is the label key that the selector applies to.
-
-
true
-
-
operator
-
string
-
- operator represents a key's relationship to a set of values.
-Valid operators are In, NotIn, Exists and DoesNotExist.
-
-
true
-
-
values
-
[]string
-
- values is an array of string values. If the operator is In or NotIn,
-the values array must be non-empty. If the operator is Exists or DoesNotExist,
-the values array must be empty. This array is replaced during a strategic
-merge patch.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.imagePullSecrets[index]
-[↩ Parent](#proxyclassspecstatefulsetpod)
-
-
-
-LocalObjectReference contains enough information to let you locate the
-referenced object inside the same namespace.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
name
-
string
-
- Name of the referent.
-This field is effectively required, but due to backwards compatibility is
-allowed to be empty. Instances of this type with an empty value here are
-almost certainly wrong.
-More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
-
- Default:
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.securityContext
-[↩ Parent](#proxyclassspecstatefulsetpod)
-
-
-
-Proxy Pod's security context.
-By default Tailscale Kubernetes operator does not apply any Pod
-security context.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2
-
-
- appArmorProfile is the AppArmor options to use by the containers in this pod.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
fsGroup
-
integer
-
- A special supplemental group that applies to all containers in a pod.
-Some volume types allow the Kubelet to change the ownership of that volume
-to be owned by the pod:
-
-1. The owning GID will be the FSGroup
-2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
-3. The permission bits are OR'd with rw-rw----
-
-If unset, the Kubelet will not modify the ownership and permissions of any volume.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
-
false
-
-
fsGroupChangePolicy
-
string
-
- fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
-before being exposed inside Pod. This field will only apply to
-volume types which support fsGroup based ownership(and permissions).
-It will have no effect on ephemeral volume types such as: secret, configmaps
-and emptydir.
-Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
runAsGroup
-
integer
-
- The GID to run the entrypoint of the container process.
-Uses runtime default if unset.
-May also be set in SecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence
-for that container.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
-
false
-
-
runAsNonRoot
-
boolean
-
- Indicates that the container must run as a non-root user.
-If true, the Kubelet will validate the image at runtime to ensure that it
-does not run as UID 0 (root) and fail to start the container if it does.
-If unset or false, no such validation will be performed.
-May also be set in SecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-
-
false
-
-
runAsUser
-
integer
-
- The UID to run the entrypoint of the container process.
-Defaults to user specified in image metadata if unspecified.
-May also be set in SecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence
-for that container.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
- The SELinux context to be applied to all containers.
-If unspecified, the container runtime will allocate a random SELinux context for each
-container. May also be set in SecurityContext. If set in
-both SecurityContext and PodSecurityContext, the value specified in SecurityContext
-takes precedence for that container.
-Note that this field cannot be set when spec.os.name is windows.
-
- The seccomp options to use by the containers in this pod.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
supplementalGroups
-
[]integer
-
- A list of groups applied to the first process run in each container, in addition
-to the container's primary GID, the fsGroup (if specified), and group memberships
-defined in the container image for the uid of the container process. If unspecified,
-no additional groups are added to any container. Note that group memberships
-defined in the container image for the uid of the container process are still effective,
-even if they are not included in this list.
-Note that this field cannot be set when spec.os.name is windows.
-
- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
-sysctls (by the container runtime) might fail to launch.
-Note that this field cannot be set when spec.os.name is windows.
-
- The Windows specific settings applied to all containers.
-If unspecified, the options within a container's SecurityContext will be used.
-If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is linux.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.securityContext.appArmorProfile
-[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext)
-
-
-
-appArmorProfile is the AppArmor options to use by the containers in this pod.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
type
-
string
-
- type indicates which kind of AppArmor profile will be applied.
-Valid options are:
- Localhost - a profile pre-loaded on the node.
- RuntimeDefault - the container runtime's default profile.
- Unconfined - no AppArmor enforcement.
-
-
true
-
-
localhostProfile
-
string
-
- localhostProfile indicates a profile loaded on the node that should be used.
-The profile must be preconfigured on the node to work.
-Must match the loaded name of the profile.
-Must be set if and only if type is "Localhost".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.securityContext.seLinuxOptions
-[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext)
-
-
-
-The SELinux context to be applied to all containers.
-If unspecified, the container runtime will allocate a random SELinux context for each
-container. May also be set in SecurityContext. If set in
-both SecurityContext and PodSecurityContext, the value specified in SecurityContext
-takes precedence for that container.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
level
-
string
-
- Level is SELinux level label that applies to the container.
-
-
false
-
-
role
-
string
-
- Role is a SELinux role label that applies to the container.
-
-
false
-
-
type
-
string
-
- Type is a SELinux type label that applies to the container.
-
-
false
-
-
user
-
string
-
- User is a SELinux user label that applies to the container.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.securityContext.seccompProfile
-[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext)
-
-
-
-The seccomp options to use by the containers in this pod.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
type
-
string
-
- type indicates which kind of seccomp profile will be applied.
-Valid options are:
-
-Localhost - a profile defined in a file on the node should be used.
-RuntimeDefault - the container runtime default profile should be used.
-Unconfined - no profile should be applied.
-
-
true
-
-
localhostProfile
-
string
-
- localhostProfile indicates a profile defined in a file on the node should be used.
-The profile must be preconfigured on the node to work.
-Must be a descending path, relative to the kubelet's configured seccomp profile location.
-Must be set if type is "Localhost". Must NOT be set for any other type.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.securityContext.sysctls[index]
-[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext)
-
-
-
-Sysctl defines a kernel parameter to be set
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
name
-
string
-
- Name of a property to set
-
-
true
-
-
value
-
string
-
- Value of a property to set
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.securityContext.windowsOptions
-[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext)
-
-
-
-The Windows specific settings applied to all containers.
-If unspecified, the options within a container's SecurityContext will be used.
-If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is linux.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
gmsaCredentialSpec
-
string
-
- GMSACredentialSpec is where the GMSA admission webhook
-(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
-GMSA credential spec named by the GMSACredentialSpecName field.
-
-
false
-
-
gmsaCredentialSpecName
-
string
-
- GMSACredentialSpecName is the name of the GMSA credential spec to use.
-
-
false
-
-
hostProcess
-
boolean
-
- HostProcess determines if a container should be run as a 'Host Process' container.
-All of a Pod's containers must have the same effective HostProcess value
-(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
-In addition, if HostProcess is true then HostNetwork must also be set to true.
-
-
false
-
-
runAsUserName
-
string
-
- The UserName in Windows to run the entrypoint of the container process.
-Defaults to the user specified in image metadata if unspecified.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer
-[↩ Parent](#proxyclassspecstatefulsetpod)
-
-
-
-Configuration for the proxy container running tailscale.
-
-
- List of environment variables to set in the container.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
-Note that environment variables provided here will take precedence
-over Tailscale-specific environment variables set by the operator,
-however running proxies with custom values for Tailscale environment
-variables (i.e TS_USERSPACE) is not recommended and might break in
-the future.
-
-
false
-
-
image
-
string
-
- Container image name. By default images are pulled from
-docker.io/tailscale/tailscale, but the official images are also
-available at ghcr.io/tailscale/tailscale. Specifying image name here
-will override any proxy image values specified via the Kubernetes
-operator's Helm chart values or PROXY_IMAGE env var in the operator
-Deployment.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
-
-
false
-
-
imagePullPolicy
-
enum
-
- Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
-
- Enum: Always, Never, IfNotPresent
-
- Container resource requirements.
-By default Tailscale Kubernetes operator does not apply any resource
-requirements. The amount of resources required wil depend on the
-amount of resources the operator needs to parse, usage patterns and
-cluster size.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
-
- Container security context.
-Security context specified here will override the security context by the operator.
-By default the operator:
-- sets 'privileged: true' for the init container
-- set NET_ADMIN capability for tailscale container for proxies that
-are created for Services or Connector.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
-
- Name of the environment variable. Must be a C_IDENTIFIER.
-
-
true
-
-
value
-
string
-
- Variable references $(VAR_NAME) are expanded using the previously defined
- environment variables in the container and any service environment
-variables. If a variable cannot be resolved, the reference in the input
-string will be unchanged. Double $$ are reduced to a single $, which
-allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
-produce the string literal "$(VAR_NAME)". Escaped references will never
-be expanded, regardless of whether the variable exists or not. Defaults
-to "".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.resources
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainer)
-
-
-
-Container resource requirements.
-By default Tailscale Kubernetes operator does not apply any resource
-requirements. The amount of resources required wil depend on the
-amount of resources the operator needs to parse, usage patterns and
-cluster size.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
-
-
- Claims lists the names of resources, defined in spec.resourceClaims,
-that are used by this container.
-
-This is an alpha field and requires enabling the
-DynamicResourceAllocation feature gate.
-
-This field is immutable. It can only be set for containers.
-
-
false
-
-
limits
-
map[string]int or string
-
- Limits describes the maximum amount of compute resources allowed.
-More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
-
false
-
-
requests
-
map[string]int or string
-
- Requests describes the minimum amount of compute resources required.
-If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
-otherwise to an implementation-defined value. Requests cannot exceed Limits.
-More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.resources.claims[index]
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainerresources)
-
-
-
-ResourceClaim references one entry in PodSpec.ResourceClaims.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
name
-
string
-
- Name must match the name of one entry in pod.spec.resourceClaims of
-the Pod where this field is used. It makes that resource available
-inside a container.
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainer)
-
-
-
-Container security context.
-Security context specified here will override the security context by the operator.
-By default the operator:
-- sets 'privileged: true' for the init container
-- set NET_ADMIN capability for tailscale container for proxies that
-are created for Services or Connector.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
allowPrivilegeEscalation
-
boolean
-
- AllowPrivilegeEscalation controls whether a process can gain more
-privileges than its parent process. This bool directly controls if
-the no_new_privs flag will be set on the container process.
-AllowPrivilegeEscalation is true always when the container is:
-1) run as Privileged
-2) has CAP_SYS_ADMIN
-Note that this field cannot be set when spec.os.name is windows.
-
- appArmorProfile is the AppArmor options to use by this container. If set, this profile
-overrides the pod's appArmorProfile.
-Note that this field cannot be set when spec.os.name is windows.
-
- The capabilities to add/drop when running containers.
-Defaults to the default set of capabilities granted by the container runtime.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
privileged
-
boolean
-
- Run container in privileged mode.
-Processes in privileged containers are essentially equivalent to root on the host.
-Defaults to false.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
procMount
-
string
-
- procMount denotes the type of proc mount to use for the containers.
-The default is DefaultProcMount which uses the container runtime defaults for
-readonly paths and masked paths.
-This requires the ProcMountType feature flag to be enabled.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
readOnlyRootFilesystem
-
boolean
-
- Whether this container has a read-only root filesystem.
-Default is false.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
runAsGroup
-
integer
-
- The GID to run the entrypoint of the container process.
-Uses runtime default if unset.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
-
false
-
-
runAsNonRoot
-
boolean
-
- Indicates that the container must run as a non-root user.
-If true, the Kubelet will validate the image at runtime to ensure that it
-does not run as UID 0 (root) and fail to start the container if it does.
-If unset or false, no such validation will be performed.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-
-
false
-
-
runAsUser
-
integer
-
- The UID to run the entrypoint of the container process.
-Defaults to user specified in image metadata if unspecified.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
- The SELinux context to be applied to the container.
-If unspecified, the container runtime will allocate a random SELinux context for each
-container. May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
- The seccomp options to use by this container. If seccomp options are
-provided at both the pod & container level, the container options
-override the pod options.
-Note that this field cannot be set when spec.os.name is windows.
-
- The Windows specific settings applied to all containers.
-If unspecified, the options from the PodSecurityContext will be used.
-If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is linux.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.appArmorProfile
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext)
-
-
-
-appArmorProfile is the AppArmor options to use by this container. If set, this profile
-overrides the pod's appArmorProfile.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
type
-
string
-
- type indicates which kind of AppArmor profile will be applied.
-Valid options are:
- Localhost - a profile pre-loaded on the node.
- RuntimeDefault - the container runtime's default profile.
- Unconfined - no AppArmor enforcement.
-
-
true
-
-
localhostProfile
-
string
-
- localhostProfile indicates a profile loaded on the node that should be used.
-The profile must be preconfigured on the node to work.
-Must match the loaded name of the profile.
-Must be set if and only if type is "Localhost".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.capabilities
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext)
-
-
-
-The capabilities to add/drop when running containers.
-Defaults to the default set of capabilities granted by the container runtime.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
add
-
[]string
-
- Added capabilities
-
-
false
-
-
drop
-
[]string
-
- Removed capabilities
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.seLinuxOptions
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext)
-
-
-
-The SELinux context to be applied to the container.
-If unspecified, the container runtime will allocate a random SELinux context for each
-container. May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
level
-
string
-
- Level is SELinux level label that applies to the container.
-
-
false
-
-
role
-
string
-
- Role is a SELinux role label that applies to the container.
-
-
false
-
-
type
-
string
-
- Type is a SELinux type label that applies to the container.
-
-
false
-
-
user
-
string
-
- User is a SELinux user label that applies to the container.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.seccompProfile
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext)
-
-
-
-The seccomp options to use by this container. If seccomp options are
-provided at both the pod & container level, the container options
-override the pod options.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
type
-
string
-
- type indicates which kind of seccomp profile will be applied.
-Valid options are:
-
-Localhost - a profile defined in a file on the node should be used.
-RuntimeDefault - the container runtime default profile should be used.
-Unconfined - no profile should be applied.
-
-
true
-
-
localhostProfile
-
string
-
- localhostProfile indicates a profile defined in a file on the node should be used.
-The profile must be preconfigured on the node to work.
-Must be a descending path, relative to the kubelet's configured seccomp profile location.
-Must be set if type is "Localhost". Must NOT be set for any other type.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.windowsOptions
-[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext)
-
-
-
-The Windows specific settings applied to all containers.
-If unspecified, the options from the PodSecurityContext will be used.
-If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is linux.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
gmsaCredentialSpec
-
string
-
- GMSACredentialSpec is where the GMSA admission webhook
-(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
-GMSA credential spec named by the GMSACredentialSpecName field.
-
-
false
-
-
gmsaCredentialSpecName
-
string
-
- GMSACredentialSpecName is the name of the GMSA credential spec to use.
-
-
false
-
-
hostProcess
-
boolean
-
- HostProcess determines if a container should be run as a 'Host Process' container.
-All of a Pod's containers must have the same effective HostProcess value
-(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
-In addition, if HostProcess is true then HostNetwork must also be set to true.
-
-
false
-
-
runAsUserName
-
string
-
- The UserName in Windows to run the entrypoint of the container process.
-Defaults to the user specified in image metadata if unspecified.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer
-[↩ Parent](#proxyclassspecstatefulsetpod)
-
-
-
-Configuration for the proxy init container that enables forwarding.
-
-
- List of environment variables to set in the container.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
-Note that environment variables provided here will take precedence
-over Tailscale-specific environment variables set by the operator,
-however running proxies with custom values for Tailscale environment
-variables (i.e TS_USERSPACE) is not recommended and might break in
-the future.
-
-
false
-
-
image
-
string
-
- Container image name. By default images are pulled from
-docker.io/tailscale/tailscale, but the official images are also
-available at ghcr.io/tailscale/tailscale. Specifying image name here
-will override any proxy image values specified via the Kubernetes
-operator's Helm chart values or PROXY_IMAGE env var in the operator
-Deployment.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
-
-
false
-
-
imagePullPolicy
-
enum
-
- Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
-
- Enum: Always, Never, IfNotPresent
-
- Container resource requirements.
-By default Tailscale Kubernetes operator does not apply any resource
-requirements. The amount of resources required wil depend on the
-amount of resources the operator needs to parse, usage patterns and
-cluster size.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
-
- Container security context.
-Security context specified here will override the security context by the operator.
-By default the operator:
-- sets 'privileged: true' for the init container
-- set NET_ADMIN capability for tailscale container for proxies that
-are created for Services or Connector.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
-
- Name of the environment variable. Must be a C_IDENTIFIER.
-
-
true
-
-
value
-
string
-
- Variable references $(VAR_NAME) are expanded using the previously defined
- environment variables in the container and any service environment
-variables. If a variable cannot be resolved, the reference in the input
-string will be unchanged. Double $$ are reduced to a single $, which
-allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
-produce the string literal "$(VAR_NAME)". Escaped references will never
-be expanded, regardless of whether the variable exists or not. Defaults
-to "".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.resources
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainer)
-
-
-
-Container resource requirements.
-By default Tailscale Kubernetes operator does not apply any resource
-requirements. The amount of resources required wil depend on the
-amount of resources the operator needs to parse, usage patterns and
-cluster size.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
-
-
- Claims lists the names of resources, defined in spec.resourceClaims,
-that are used by this container.
-
-This is an alpha field and requires enabling the
-DynamicResourceAllocation feature gate.
-
-This field is immutable. It can only be set for containers.
-
-
false
-
-
limits
-
map[string]int or string
-
- Limits describes the maximum amount of compute resources allowed.
-More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
-
false
-
-
requests
-
map[string]int or string
-
- Requests describes the minimum amount of compute resources required.
-If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
-otherwise to an implementation-defined value. Requests cannot exceed Limits.
-More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.resources.claims[index]
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainerresources)
-
-
-
-ResourceClaim references one entry in PodSpec.ResourceClaims.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
name
-
string
-
- Name must match the name of one entry in pod.spec.resourceClaims of
-the Pod where this field is used. It makes that resource available
-inside a container.
-
-
true
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainer)
-
-
-
-Container security context.
-Security context specified here will override the security context by the operator.
-By default the operator:
-- sets 'privileged: true' for the init container
-- set NET_ADMIN capability for tailscale container for proxies that
-are created for Services or Connector.
-https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
allowPrivilegeEscalation
-
boolean
-
- AllowPrivilegeEscalation controls whether a process can gain more
-privileges than its parent process. This bool directly controls if
-the no_new_privs flag will be set on the container process.
-AllowPrivilegeEscalation is true always when the container is:
-1) run as Privileged
-2) has CAP_SYS_ADMIN
-Note that this field cannot be set when spec.os.name is windows.
-
- appArmorProfile is the AppArmor options to use by this container. If set, this profile
-overrides the pod's appArmorProfile.
-Note that this field cannot be set when spec.os.name is windows.
-
- The capabilities to add/drop when running containers.
-Defaults to the default set of capabilities granted by the container runtime.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
privileged
-
boolean
-
- Run container in privileged mode.
-Processes in privileged containers are essentially equivalent to root on the host.
-Defaults to false.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
procMount
-
string
-
- procMount denotes the type of proc mount to use for the containers.
-The default is DefaultProcMount which uses the container runtime defaults for
-readonly paths and masked paths.
-This requires the ProcMountType feature flag to be enabled.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
readOnlyRootFilesystem
-
boolean
-
- Whether this container has a read-only root filesystem.
-Default is false.
-Note that this field cannot be set when spec.os.name is windows.
-
-
false
-
-
runAsGroup
-
integer
-
- The GID to run the entrypoint of the container process.
-Uses runtime default if unset.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
-
false
-
-
runAsNonRoot
-
boolean
-
- Indicates that the container must run as a non-root user.
-If true, the Kubelet will validate the image at runtime to ensure that it
-does not run as UID 0 (root) and fail to start the container if it does.
-If unset or false, no such validation will be performed.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-
-
false
-
-
runAsUser
-
integer
-
- The UID to run the entrypoint of the container process.
-Defaults to user specified in image metadata if unspecified.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
- Format: int64
-
- The SELinux context to be applied to the container.
-If unspecified, the container runtime will allocate a random SELinux context for each
-container. May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
- The seccomp options to use by this container. If seccomp options are
-provided at both the pod & container level, the container options
-override the pod options.
-Note that this field cannot be set when spec.os.name is windows.
-
- The Windows specific settings applied to all containers.
-If unspecified, the options from the PodSecurityContext will be used.
-If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is linux.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.appArmorProfile
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext)
-
-
-
-appArmorProfile is the AppArmor options to use by this container. If set, this profile
-overrides the pod's appArmorProfile.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
type
-
string
-
- type indicates which kind of AppArmor profile will be applied.
-Valid options are:
- Localhost - a profile pre-loaded on the node.
- RuntimeDefault - the container runtime's default profile.
- Unconfined - no AppArmor enforcement.
-
-
true
-
-
localhostProfile
-
string
-
- localhostProfile indicates a profile loaded on the node that should be used.
-The profile must be preconfigured on the node to work.
-Must match the loaded name of the profile.
-Must be set if and only if type is "Localhost".
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.capabilities
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext)
-
-
-
-The capabilities to add/drop when running containers.
-Defaults to the default set of capabilities granted by the container runtime.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
add
-
[]string
-
- Added capabilities
-
-
false
-
-
drop
-
[]string
-
- Removed capabilities
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.seLinuxOptions
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext)
-
-
-
-The SELinux context to be applied to the container.
-If unspecified, the container runtime will allocate a random SELinux context for each
-container. May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
level
-
string
-
- Level is SELinux level label that applies to the container.
-
-
false
-
-
role
-
string
-
- Role is a SELinux role label that applies to the container.
-
-
false
-
-
type
-
string
-
- Type is a SELinux type label that applies to the container.
-
-
false
-
-
user
-
string
-
- User is a SELinux user label that applies to the container.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.seccompProfile
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext)
-
-
-
-The seccomp options to use by this container. If seccomp options are
-provided at both the pod & container level, the container options
-override the pod options.
-Note that this field cannot be set when spec.os.name is windows.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
type
-
string
-
- type indicates which kind of seccomp profile will be applied.
-Valid options are:
-
-Localhost - a profile defined in a file on the node should be used.
-RuntimeDefault - the container runtime default profile should be used.
-Unconfined - no profile should be applied.
-
-
true
-
-
localhostProfile
-
string
-
- localhostProfile indicates a profile defined in a file on the node should be used.
-The profile must be preconfigured on the node to work.
-Must be a descending path, relative to the kubelet's configured seccomp profile location.
-Must be set if type is "Localhost". Must NOT be set for any other type.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.windowsOptions
-[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext)
-
-
-
-The Windows specific settings applied to all containers.
-If unspecified, the options from the PodSecurityContext will be used.
-If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
-Note that this field cannot be set when spec.os.name is linux.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
gmsaCredentialSpec
-
string
-
- GMSACredentialSpec is where the GMSA admission webhook
-(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
-GMSA credential spec named by the GMSACredentialSpecName field.
-
-
false
-
-
gmsaCredentialSpecName
-
string
-
- GMSACredentialSpecName is the name of the GMSA credential spec to use.
-
-
false
-
-
hostProcess
-
boolean
-
- HostProcess determines if a container should be run as a 'Host Process' container.
-All of a Pod's containers must have the same effective HostProcess value
-(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
-In addition, if HostProcess is true then HostNetwork must also be set to true.
-
-
false
-
-
runAsUserName
-
string
-
- The UserName in Windows to run the entrypoint of the container process.
-Defaults to the user specified in image metadata if unspecified.
-May also be set in PodSecurityContext. If set in both SecurityContext and
-PodSecurityContext, the value specified in SecurityContext takes precedence.
-
-
false
-
-
-
-
-### ProxyClass.spec.statefulSet.pod.tolerations[index]
-[↩ Parent](#proxyclassspecstatefulsetpod)
-
-
-
-The pod this Toleration is attached to tolerates any taint that matches
-the triple using the matching operator .
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
effect
-
string
-
- Effect indicates the taint effect to match. Empty means match all taint effects.
-When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
-
-
false
-
-
key
-
string
-
- Key is the taint key that the toleration applies to. Empty means match all taint keys.
-If the key is empty, operator must be Exists; this combination means to match all values and all keys.
-
-
false
-
-
operator
-
string
-
- Operator represents a key's relationship to the value.
-Valid operators are Exists and Equal. Defaults to Equal.
-Exists is equivalent to wildcard for value, so that a pod can
-tolerate all taints of a particular category.
-
-
false
-
-
tolerationSeconds
-
integer
-
- TolerationSeconds represents the period of time the toleration (which must be
-of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
-it is not set, which means tolerate the taint forever (do not evict). Zero and
-negative values will be treated as 0 (evict immediately) by the system.
-
- Format: int64
-
-
false
-
-
value
-
string
-
- Value is the taint value the toleration matches to.
-If the operator is Exists, the value should be empty, otherwise just a regular string.
-
-
false
-
-
-
-
-### ProxyClass.spec.tailscale
-[↩ Parent](#proxyclassspec)
-
-
-
-TailscaleConfig contains options to configure the tailscale-specific
-parameters of proxies.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
acceptRoutes
-
boolean
-
- AcceptRoutes can be set to true to make the proxy instance accept
-routes advertized by other nodes on the tailnet, such as subnet
-routes.
-This is equivalent of passing --accept-routes flag to a tailscale Linux client.
-https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines
-Defaults to false.
-
-
false
-
-
-
-
-### ProxyClass.status
-[↩ Parent](#proxyclass)
-
-
-
-Status of the ProxyClass. This is set and managed automatically.
-https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
-
- List of status conditions to indicate the status of the ProxyClass.
-Known condition types are `ProxyClassReady`.
-
-
false
-
-
-
-
-### ProxyClass.status.conditions[index]
-[↩ Parent](#proxyclassstatus)
-
-
-
-Condition contains details for one aspect of the current state of this API Resource.
-
-
-
-
-
Name
-
Type
-
Description
-
Required
-
-
-
-
lastTransitionTime
-
string
-
- lastTransitionTime is the last time the condition transitioned from one status to another.
-This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
-
- Format: date-time
-
-
true
-
-
message
-
string
-
- message is a human readable message indicating details about the transition.
-This may be an empty string.
-
-
true
-
-
reason
-
string
-
- reason contains a programmatic identifier indicating the reason for the condition's last transition.
-Producers of specific condition types may define expected values and meanings for this field,
-and whether the values are considered a guaranteed API.
-The value should be a CamelCase string.
-This field may not be empty.
-
-
true
-
-
status
-
enum
-
- status of the condition, one of True, False, Unknown.
-
- Enum: True, False, Unknown
-
-
true
-
-
type
-
string
-
- type of condition in CamelCase or in foo.example.com/CamelCase.
-
-
true
-
-
observedGeneration
-
integer
-
- observedGeneration represents the .metadata.generation that the condition was set based upon.
-For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
-with respect to the current state of the instance.
-
- Format: int64
- Minimum: 0
-
-
false
-
-
+
+
+_Appears in:_
+- [ProxyClassList](#proxyclasslist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
+| `kind` _string_ | `ProxyClass` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[ProxyClassSpec](#proxyclassspec)_ | Specification of the desired state of the ProxyClass resource. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | |
+| `status` _[ProxyClassStatus](#proxyclassstatus)_ | Status of the ProxyClass. This is set and managed automatically. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | |
+
+
+#### ProxyClassList
+
+
+
+
+
+
+
+
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | |
+| `kind` _string_ | `ProxyClassList` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[ProxyClass](#proxyclass) array_ | | | |
+
+
+#### ProxyClassSpec
+
+
+
+
+
+
+
+_Appears in:_
+- [ProxyClass](#proxyclass)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `statefulSet` _[StatefulSet](#statefulset)_ | Configuration parameters for the proxy's StatefulSet. Tailscale Kubernetes operator deploys a StatefulSet for each of the user configured proxies (Tailscale Ingress, Tailscale Service, Connector). | | |
+| `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation. Note that the metrics are currently considered unstable and will likely change in breaking ways in the future - we only recommend that you use those for debugging purposes. | | |
+| `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific parameters of proxies. | | |
+
+
+#### ProxyClassStatus
+
+
+
+
+
+
+
+_Appears in:_
+- [ProxyClass](#proxyclass)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyClass. Known condition types are `ProxyClassReady`. | | |
+
+
+#### Route
+
+_Underlying type:_ _string_
+
+
+
+_Validation:_
+- Format: cidr
+- Type: string
+
+_Appears in:_
+- [Routes](#routes)
+
+
+
+#### Routes
+
+_Underlying type:_ _[Route](#route)_
+
+
+
+_Validation:_
+- Format: cidr
+- MinItems: 1
+- Type: string
+
+_Appears in:_
+- [SubnetRouter](#subnetrouter)
+
+
+
+#### StatefulSet
+
+
+
+
+
+
+
+_Appears in:_
+- [ProxyClassSpec](#proxyclassspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `labels` _object (keys:string, values:string)_ | Labels that will be added to the StatefulSet created for the proxy. Any labels specified here will be merged with the default labels applied to the StatefulSet by the Tailscale Kubernetes operator as well as any other labels that might have been applied by other actors. Label keys and values must be valid Kubernetes label keys and values. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | |
+| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the StatefulSet created for the proxy. Any Annotations specified here will be merged with the default annotations applied to the StatefulSet by the Tailscale Kubernetes operator as well as any other annotations that might have been applied by other actors. Annotations must be valid Kubernetes annotations. https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | |
+| `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | |
+
+
+#### SubnetRouter
+
+
+
+SubnetRouter defines subnet routes that should be exposed to tailnet via a
+Connector node.
+
+
+
+_Appears in:_
+- [ConnectorSpec](#connectorspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `advertiseRoutes` _[Routes](#routes)_ | AdvertiseRoutes refer to CIDRs that the subnet router should make available. Route values must be strings that represent a valid IPv4 or IPv6 CIDR range. Values can be Tailscale 4via6 subnet routes. https://tailscale.com/kb/1201/4via6-subnets/ | | Format: cidr MinItems: 1 Type: string |
+
+
+#### Tag
+
+_Underlying type:_ _string_
+
+
+
+_Validation:_
+- Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
+- Type: string
+
+_Appears in:_
+- [Tags](#tags)
+
+
+
+#### Tags
+
+_Underlying type:_ _[Tag](#tag)_
+
+
+
+_Validation:_
+- Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
+- Type: string
+
+_Appears in:_
+- [ConnectorSpec](#connectorspec)
+
+
+
+#### TailscaleConfig
+
+
+
+
+
+
+
+_Appears in:_
+- [ProxyClassSpec](#proxyclassspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `acceptRoutes` _boolean_ | AcceptRoutes can be set to true to make the proxy instance accept routes advertized by other nodes on the tailnet, such as subnet routes. This is equivalent of passing --accept-routes flag to a tailscale Linux client. https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines Defaults to false. | | |
+
+
diff --git a/k8s-operator/sessionrecording/fakes/fakes.go b/k8s-operator/sessionrecording/fakes/fakes.go
new file mode 100644
index 0000000000000..9eb1047e4242f
--- /dev/null
+++ b/k8s-operator/sessionrecording/fakes/fakes.go
@@ -0,0 +1,138 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !plan9
+
+// Package fakes contains mocks used for testing 'kubectl exec' session
+// recording functionality.
+package fakes
+
+import (
+ "bytes"
+ "encoding/json"
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ "math/rand"
+
+ "tailscale.com/sessionrecording"
+ "tailscale.com/tstime"
+)
+
+func New(conn net.Conn, wb bytes.Buffer, rb bytes.Buffer, closed bool) net.Conn {
+ return &TestConn{
+ Conn: conn,
+ writeBuf: wb,
+ readBuf: rb,
+ closed: closed,
+ }
+}
+
+type TestConn struct {
+ net.Conn
+ // writeBuf contains whatever was send to the conn via Write.
+ writeBuf bytes.Buffer
+ // readBuf contains whatever was sent to the conn via Read.
+ readBuf bytes.Buffer
+ sync.RWMutex // protects the following
+ closed bool
+}
+
+var _ net.Conn = &TestConn{}
+
+func (tc *TestConn) Read(b []byte) (int, error) {
+ return tc.readBuf.Read(b)
+}
+
+func (tc *TestConn) Write(b []byte) (int, error) {
+ return tc.writeBuf.Write(b)
+}
+
+func (tc *TestConn) Close() error {
+ tc.Lock()
+ defer tc.Unlock()
+ tc.closed = true
+ return nil
+}
+
+func (tc *TestConn) IsClosed() bool {
+ tc.Lock()
+ defer tc.Unlock()
+ return tc.closed
+}
+
+func (tc *TestConn) WriteBufBytes() []byte {
+ return tc.writeBuf.Bytes()
+}
+
+func (tc *TestConn) ResetReadBuf() {
+ tc.readBuf.Reset()
+}
+
+func (tc *TestConn) WriteReadBufBytes(b []byte) error {
+ _, err := tc.readBuf.Write(b)
+ return err
+}
+
+type TestSessionRecorder struct {
+ // buf holds data that was sent to the session recorder.
+ buf bytes.Buffer
+}
+
+func (t *TestSessionRecorder) Write(b []byte) (int, error) {
+ return t.buf.Write(b)
+}
+
+func (t *TestSessionRecorder) Close() error {
+ t.buf.Reset()
+ return nil
+}
+
+func (t *TestSessionRecorder) Bytes() []byte {
+ return t.buf.Bytes()
+}
+
+func CastLine(t *testing.T, p []byte, clock tstime.Clock) []byte {
+ t.Helper()
+ j, err := json.Marshal([]any{
+ clock.Now().Sub(clock.Now()).Seconds(),
+ "o",
+ string(p),
+ })
+ if err != nil {
+ t.Fatalf("error marshalling cast line: %v", err)
+ }
+ return append(j, '\n')
+}
+
+func AsciinemaResizeMsg(t *testing.T, width, height int) []byte {
+ t.Helper()
+ ch := sessionrecording.CastHeader{
+ Width: width,
+ Height: height,
+ }
+ bs, err := json.Marshal(ch)
+ if err != nil {
+ t.Fatalf("error marshalling CastHeader: %v", err)
+ }
+ return append(bs, '\n')
+}
+
+func RandomBytes(t *testing.T) [][]byte {
+ t.Helper()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ n := r.Intn(4096)
+ b := make([]byte, n)
+ t.Logf("RandomBytes: generating byte slice of length %d", n)
+ _, err := r.Read(b)
+ if err != nil {
+ t.Fatalf("error generating random byte slice: %v", err)
+ }
+ if len(b) < 2 {
+ return [][]byte{b}
+ }
+ split := r.Intn(len(b) - 1)
+ return [][]byte{b[:split], b[split:]}
+}
diff --git a/cmd/k8s-operator/spdy-hijacker.go b/k8s-operator/sessionrecording/hijacker.go
similarity index 66%
rename from cmd/k8s-operator/spdy-hijacker.go
rename to k8s-operator/sessionrecording/hijacker.go
index f74771e420914..2e7ec75980bac 100644
--- a/cmd/k8s-operator/spdy-hijacker.go
+++ b/k8s-operator/sessionrecording/hijacker.go
@@ -3,7 +3,9 @@
//go:build !plan9
-package main
+// Package sessionrecording contains functionality for recording Kubernetes API
+// server proxy 'kubectl exec' sessions.
+package sessionrecording
import (
"bufio"
@@ -19,17 +21,68 @@ import (
"github.com/pkg/errors"
"go.uber.org/zap"
"tailscale.com/client/tailscale/apitype"
+ "tailscale.com/k8s-operator/sessionrecording/spdy"
+ "tailscale.com/k8s-operator/sessionrecording/tsrecorder"
+ "tailscale.com/k8s-operator/sessionrecording/ws"
+ "tailscale.com/sessionrecording"
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
"tailscale.com/tstime"
+ "tailscale.com/util/clientmetric"
"tailscale.com/util/multierr"
)
-// spdyHijacker implements [net/http.Hijacker] interface.
+const (
+ SPDYProtocol Protocol = "SPDY"
+ WSProtocol Protocol = "WebSocket"
+)
+
+// Protocol is the streaming protocol of the hijacked session. Supported
+// protocols are SPDY and WebSocket.
+type Protocol string
+
+var (
+ // CounterSessionRecordingsAttempted counts the number of session recording attempts.
+ CounterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_attempted")
+
+ // counterSessionRecordingsUploaded counts the number of successfully uploaded session recordings.
+ counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded")
+)
+
+func New(opts HijackerOpts) *Hijacker {
+ return &Hijacker{
+ ts: opts.TS,
+ req: opts.Req,
+ who: opts.Who,
+ ResponseWriter: opts.W,
+ pod: opts.Pod,
+ ns: opts.Namespace,
+ addrs: opts.Addrs,
+ failOpen: opts.FailOpen,
+ proto: opts.Proto,
+ log: opts.Log,
+ connectToRecorder: sessionrecording.ConnectToRecorder,
+ }
+}
+
+type HijackerOpts struct {
+ TS *tsnet.Server
+ Req *http.Request
+ W http.ResponseWriter
+ Who *apitype.WhoIsResponse
+ Addrs []netip.AddrPort
+ Log *zap.SugaredLogger
+ Pod string
+ Namespace string
+ FailOpen bool
+ Proto Protocol
+}
+
+// Hijacker implements [net/http.Hijacker] interface.
// It must be configured with an http request for a 'kubectl exec' session that
// needs to be recorded. It knows how to hijack the connection and configure for
// the session contents to be sent to a tsrecorder instance.
-type spdyHijacker struct {
+type Hijacker struct {
http.ResponseWriter
ts *tsnet.Server
req *http.Request
@@ -40,6 +93,7 @@ type spdyHijacker struct {
addrs []netip.AddrPort // tsrecorder addresses
failOpen bool // whether to fail open if recording fails
connectToRecorder RecorderDialFn
+ proto Protocol // streaming protocol
}
// RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder
@@ -51,7 +105,7 @@ type RecorderDialFn func(context.Context, []netip.AddrPort, func(context.Context
// Hijack hijacks a 'kubectl exec' session and configures for the session
// contents to be sent to a recorder.
-func (h *spdyHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen)
reqConn, brw, err := h.ResponseWriter.(http.Hijacker).Hijack()
if err != nil {
@@ -69,15 +123,19 @@ func (h *spdyHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
// spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording
// logic. If connecting to the recorder fails or an error is received during the
// session and spdyHijacker.failOpen is false, connection will be closed.
-func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) {
+func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) {
const (
// https://docs.asciinema.org/manual/asciicast/v2/
asciicastv2 = 2
)
- var wc io.WriteCloser
+ var (
+ wc io.WriteCloser
+ err error
+ errChan <-chan error
+ )
h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen)
// TODO (irbekrm): send client a message that session will be recorded.
- rw, _, errChan, err := h.connectToRecorder(ctx, h.addrs, h.ts.Dial)
+ wc, _, errChan, err = h.connectToRecorder(ctx, h.addrs, h.ts.Dial)
if err != nil {
msg := fmt.Sprintf("error connecting to session recorders: %v", err)
if h.failOpen {
@@ -94,27 +152,16 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C
// TODO (irbekrm): log which recorder
h.log.Info("successfully connected to a session recorder")
- wc = rw
cl := tstime.DefaultClock{}
- lc := &spdyRemoteConnRecorder{
- log: h.log,
- Conn: conn,
- rec: &recorder{
- start: cl.Now(),
- clock: cl,
- failOpen: h.failOpen,
- conn: wc,
- },
- }
-
+ rec := tsrecorder.New(wc, cl, cl.Now(), h.failOpen)
qp := h.req.URL.Query()
- ch := CastHeader{
+ ch := sessionrecording.CastHeader{
Version: asciicastv2,
- Timestamp: lc.rec.start.Unix(),
+ Timestamp: cl.Now().Unix(),
Command: strings.Join(qp["command"], " "),
SrcNode: strings.TrimSuffix(h.who.Node.Name, "."),
SrcNodeID: h.who.Node.StableID,
- Kubernetes: &Kubernetes{
+ Kubernetes: &sessionrecording.Kubernetes{
PodName: h.pod,
Namespace: h.ns,
Container: strings.Join(qp["container"], " "),
@@ -126,7 +173,17 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C
} else {
ch.SrcNodeTags = h.who.Node.Tags
}
- lc.ch = ch
+
+ var lc net.Conn
+ switch h.proto {
+ case SPDYProtocol:
+ lc = spdy.New(conn, rec, ch, h.log)
+ case WSProtocol:
+ lc = ws.New(conn, rec, ch, h.log)
+ default:
+ return nil, fmt.Errorf("unknown protocol: %s", h.proto)
+ }
+
go func() {
var err error
select {
@@ -147,7 +204,6 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C
}
msg += "; failure mode set to 'fail closed'; closing connection"
h.log.Error(msg)
- lc.failed = true
// TODO (irbekrm): write a message to the client
if err := lc.Close(); err != nil {
h.log.Infof("error closing recorder connections: %v", err)
@@ -157,52 +213,6 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C
return lc, nil
}
-// CastHeader is the asciicast header to be sent to the recorder at the start of
-// the recording of a session.
-// https://docs.asciinema.org/manual/asciicast/v2/#header
-type CastHeader struct {
- // Version is the asciinema file format version.
- Version int `json:"version"`
-
- // Width is the terminal width in characters.
- Width int `json:"width"`
-
- // Height is the terminal height in characters.
- Height int `json:"height"`
-
- // Timestamp is the unix timestamp of when the recording started.
- Timestamp int64 `json:"timestamp"`
-
- // Tailscale-specific fields: SrcNode is the full MagicDNS name of the
- // tailnet node originating the connection, without the trailing dot.
- SrcNode string `json:"srcNode"`
-
- // SrcNodeID is the node ID of the tailnet node originating the connection.
- SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"`
-
- // SrcNodeTags is the list of tags on the node originating the connection (if any).
- SrcNodeTags []string `json:"srcNodeTags,omitempty"`
-
- // SrcNodeUserID is the user ID of the node originating the connection (if not tagged).
- SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged
-
- // SrcNodeUser is the LoginName of the node originating the connection (if not tagged).
- SrcNodeUser string `json:"srcNodeUser,omitempty"`
-
- Command string
-
- // Kubernetes-specific fields:
- Kubernetes *Kubernetes `json:"kubernetes,omitempty"`
-}
-
-// Kubernetes contains 'kubectl exec' session specific information for
-// tsrecorder.
-type Kubernetes struct {
- PodName string
- Namespace string
- Container string
-}
-
func closeConnWithWarning(conn net.Conn, msg string) error {
b := io.NopCloser(bytes.NewBuffer([]byte(msg)))
resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b}
diff --git a/cmd/k8s-operator/spdy-hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go
similarity index 72%
rename from cmd/k8s-operator/spdy-hijacker_test.go
rename to k8s-operator/sessionrecording/hijacker_test.go
index 7ac79d7f0ca7d..5c19d3a1d870e 100644
--- a/cmd/k8s-operator/spdy-hijacker_test.go
+++ b/k8s-operator/sessionrecording/hijacker_test.go
@@ -3,7 +3,7 @@
//go:build !plan9
-package main
+package sessionrecording
import (
"context"
@@ -19,12 +19,13 @@ import (
"go.uber.org/zap"
"tailscale.com/client/tailscale/apitype"
+ "tailscale.com/k8s-operator/sessionrecording/fakes"
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
"tailscale.com/tstest"
)
-func Test_SPDYHijacker(t *testing.T) {
+func Test_Hijacker(t *testing.T) {
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
@@ -36,37 +37,47 @@ func Test_SPDYHijacker(t *testing.T) {
failRecorderConnPostConnect bool // send error down the error channel
wantsConnClosed bool
wantsSetupErr bool
+ proto Protocol
}{
{
- name: "setup succeeds, conn stays open",
+ name: "setup_succeeds_conn_stays_open",
+ proto: SPDYProtocol,
},
{
- name: "setup fails, policy is to fail open, conn stays open",
+ name: "setup_succeeds_conn_stays_open_ws",
+ proto: WSProtocol,
+ },
+ {
+ name: "setup_fails_policy_is_to_fail_open_conn_stays_open",
failOpen: true,
failRecorderConnect: true,
+ proto: SPDYProtocol,
},
{
- name: "setup fails, policy is to fail closed, conn is closed",
+ name: "setup_fails_policy_is_to_fail_closed_conn_is_closed",
failRecorderConnect: true,
wantsSetupErr: true,
wantsConnClosed: true,
+ proto: SPDYProtocol,
},
{
- name: "connection fails post-initial connect, policy is to fail open, conn stays open",
+ name: "connection_fails_post-initial_connect_policy_is_to_fail_open_conn_stays_open",
failRecorderConnPostConnect: true,
failOpen: true,
+ proto: SPDYProtocol,
},
{
- name: "connection fails post-initial connect, policy is to fail closed, conn is closed",
+ name: "connection_fails_post-initial_connect,_policy_is_to_fail_closed_conn_is_closed",
failRecorderConnPostConnect: true,
wantsConnClosed: true,
+ proto: SPDYProtocol,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- tc := &testConn{}
+ tc := &fakes.TestConn{}
ch := make(chan error)
- h := &spdyHijacker{
+ h := &Hijacker{
connectToRecorder: func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) {
if tt.failRecorderConnect {
err = errors.New("test")
@@ -78,6 +89,7 @@ func Test_SPDYHijacker(t *testing.T) {
log: zl.Sugar(),
ts: &tsnet.Server{},
req: &http.Request{URL: &url.URL{}},
+ proto: tt.proto,
}
ctx := context.Background()
_, err := h.setUpRecording(ctx, tc)
@@ -98,8 +110,8 @@ func Test_SPDYHijacker(t *testing.T) {
// (test that connection remains open over some period
// of time).
if err := tstest.WaitFor(timeout, func() (err error) {
- if tt.wantsConnClosed != tc.isClosed() {
- return fmt.Errorf("got connection state: %t, wants connection state: %t", tc.isClosed(), tt.wantsConnClosed)
+ if tt.wantsConnClosed != tc.IsClosed() {
+ return fmt.Errorf("got connection state: %t, wants connection state: %t", tc.IsClosed(), tt.wantsConnClosed)
}
return nil
}); err != nil {
diff --git a/cmd/k8s-operator/spdy-remote-conn-recorder.go b/k8s-operator/sessionrecording/spdy/conn.go
similarity index 70%
rename from cmd/k8s-operator/spdy-remote-conn-recorder.go
rename to k8s-operator/sessionrecording/spdy/conn.go
index 563b2a2410b3e..19a01641e4155 100644
--- a/cmd/k8s-operator/spdy-remote-conn-recorder.go
+++ b/k8s-operator/sessionrecording/spdy/conn.go
@@ -3,7 +3,9 @@
//go:build !plan9
-package main
+// Package spdy contains functionality for parsing SPDY streaming sessions. This
+// is used for 'kubectl exec' session recording.
+package spdy
import (
"bytes"
@@ -17,16 +19,35 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
+ "tailscale.com/k8s-operator/sessionrecording/tsrecorder"
+ "tailscale.com/sessionrecording"
)
-// spdyRemoteConnRecorder is a wrapper around net.Conn. It reads the bytestream
-// for a 'kubectl exec' session, sends session recording data to the configured
-// recorder and forwards the raw bytes to the original destination.
-type spdyRemoteConnRecorder struct {
+// New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection.
+// The connection must be a hijacked connection for a 'kubectl exec' session using SPDY.
+// The hijacked connection is used to transmit SPDY streams between Kubernetes client ('kubectl') and the destination container.
+// Data read from the underlying network connection is data sent via one of the SPDY streams from the client to the container.
+// Data written to the underlying connection is data sent from the container to the client.
+// We parse the data and send everything for the STDOUT/STDERR streams to the configured tsrecorder as an asciinema recording with the provided header.
+// https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#background-remotecommand-subprotocol
+func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, log *zap.SugaredLogger) net.Conn {
+ return &conn{
+ Conn: nc,
+ rec: rec,
+ ch: ch,
+ log: log,
+ }
+}
+
+// conn is a wrapper around net.Conn. It reads the bytestream for a 'kubectl
+// exec' session streamed using SPDY protocol, sends session recording data to
+// the configured recorder and forwards the raw bytes to the original
+// destination.
+type conn struct {
net.Conn
// rec knows how to send data written to it to a tsrecorder instance.
- rec *recorder
- ch CastHeader
+ rec *tsrecorder.Client
+ ch sessionrecording.CastHeader
stdoutStreamID atomic.Uint32
stderrStreamID atomic.Uint32
@@ -34,7 +55,6 @@ type spdyRemoteConnRecorder struct {
wmu sync.Mutex // sequences writes
closed bool
- failed bool
rmu sync.Mutex // sequences reads
writeCastHeaderOnce sync.Once
@@ -53,7 +73,7 @@ type spdyRemoteConnRecorder struct {
// If the frame is a data frame for resize stream, sends resize message to the
// recorder. If the frame is a SYN_STREAM control frame that starts stdout,
// stderr or resize stream, store the stream ID.
-func (c *spdyRemoteConnRecorder) Read(b []byte) (int, error) {
+func (c *conn) Read(b []byte) (int, error) {
c.rmu.Lock()
defer c.rmu.Unlock()
n, err := c.Conn.Read(b)
@@ -103,7 +123,7 @@ func (c *spdyRemoteConnRecorder) Read(b []byte) (int, error) {
// Write forwards the raw data of the latest parsed SPDY frame to the original
// destination. If the frame is an SPDY data frame, it also sends the payload to
// the connected session recorder.
-func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) {
+func (c *conn) Write(b []byte) (int, error) {
c.wmu.Lock()
defer c.wmu.Unlock()
c.writeBuf.Write(b)
@@ -133,7 +153,7 @@ func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) {
return
}
j = append(j, '\n')
- err = c.rec.writeCastLine(j)
+ err = c.rec.WriteCastLine(j)
if err != nil {
c.log.Errorf("received error from recorder: %v", err)
}
@@ -151,15 +171,12 @@ func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) {
return len(b), err
}
-func (c *spdyRemoteConnRecorder) Close() error {
+func (c *conn) Close() error {
c.wmu.Lock()
defer c.wmu.Unlock()
if c.closed {
return nil
}
- if !c.failed && c.writeBuf.Len() > 0 {
- c.Conn.Write(c.writeBuf.Bytes())
- }
c.writeBuf.Reset()
c.closed = true
err := c.Conn.Close()
@@ -167,13 +184,13 @@ func (c *spdyRemoteConnRecorder) Close() error {
return err
}
-// parseSynStream parses SYN_STREAM SPDY control frame and updates
-// spdyRemoteConnRecorder to store the newly created stream's ID if it is one of
+// storeStreamID parses SYN_STREAM SPDY control frame and updates
+// conn to store the newly created stream's ID if it is one of
// the stream types we care about. Storing stream_id:stream_type mapping allows
// us to parse received data frames (that have stream IDs) differently depening
// on which stream they belong to (i.e send data frame payload for stdout stream
// to session recorder).
-func (c *spdyRemoteConnRecorder) storeStreamID(sf spdyFrame, header http.Header) {
+func (c *conn) storeStreamID(sf spdyFrame, header http.Header) {
const (
streamTypeHeaderKey = "Streamtype"
)
diff --git a/cmd/k8s-operator/spdy-remote-conn-recorder_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go
similarity index 78%
rename from cmd/k8s-operator/spdy-remote-conn-recorder_test.go
rename to k8s-operator/sessionrecording/spdy/conn_test.go
index 95f5a8bfcef5e..629536b2e00b1 100644
--- a/cmd/k8s-operator/spdy-remote-conn-recorder_test.go
+++ b/k8s-operator/sessionrecording/spdy/conn_test.go
@@ -3,19 +3,19 @@
//go:build !plan9
-package main
+package spdy
import (
- "bytes"
"encoding/json"
- "net"
+ "fmt"
"reflect"
- "sync"
"testing"
"go.uber.org/zap"
+ "tailscale.com/k8s-operator/sessionrecording/fakes"
+ "tailscale.com/k8s-operator/sessionrecording/tsrecorder"
+ "tailscale.com/sessionrecording"
"tailscale.com/tstest"
- "tailscale.com/tstime"
)
// Test_Writes tests that 1 or more Write calls to spdyRemoteConnRecorder
@@ -56,13 +56,13 @@ func Test_Writes(t *testing.T) {
name: "single_write_stdout_data_frame_with_payload",
inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
- wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
+ wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
},
{
name: "single_write_stderr_data_frame_with_payload",
inputs: [][]byte{{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
wantForwarded: []byte{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
- wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
+ wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
},
{
name: "single_data_frame_unknow_stream_with_payload",
@@ -73,13 +73,13 @@ func Test_Writes(t *testing.T) {
name: "control_frame_and_data_frame_split_across_two_writes",
inputs: [][]byte{{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, {0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
- wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
+ wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl),
},
{
name: "single_first_write_stdout_data_frame_with_payload",
inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}},
wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5},
- wantRecorded: append(asciinemaResizeMsg(t, 10, 20), castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...),
+ wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...),
width: 10,
height: 20,
firstWrite: true,
@@ -87,19 +87,15 @@ func Test_Writes(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- tc := &testConn{}
- sr := &testSessionRecorder{}
- rec := &recorder{
- conn: sr,
- clock: cl,
- start: cl.Now(),
- }
+ tc := &fakes.TestConn{}
+ sr := &fakes.TestSessionRecorder{}
+ rec := tsrecorder.New(sr, cl, cl.Now(), true)
- c := &spdyRemoteConnRecorder{
+ c := &conn{
Conn: tc,
log: zl.Sugar(),
rec: rec,
- ch: CastHeader{
+ ch: sessionrecording.CastHeader{
Width: tt.width,
Height: tt.height,
},
@@ -118,13 +114,13 @@ func Test_Writes(t *testing.T) {
}
// Assert that the expected bytes have been forwarded to the original destination.
- gotForwarded := tc.writeBuf.Bytes()
+ gotForwarded := tc.WriteBufBytes()
if !reflect.DeepEqual(gotForwarded, tt.wantForwarded) {
t.Errorf("expected bytes not forwarded, wants\n%v\ngot\n%v", tt.wantForwarded, gotForwarded)
}
// Assert that the expected bytes have been forwarded to the session recorder.
- gotRecorded := sr.buf.Bytes()
+ gotRecorded := sr.Bytes()
if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) {
t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded)
}
@@ -197,14 +193,10 @@ func Test_Reads(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- tc := &testConn{}
- sr := &testSessionRecorder{}
- rec := &recorder{
- conn: sr,
- clock: cl,
- start: cl.Now(),
- }
- c := &spdyRemoteConnRecorder{
+ tc := &fakes.TestConn{}
+ sr := &fakes.TestSessionRecorder{}
+ rec := tsrecorder.New(sr, cl, cl.Now(), true)
+ c := &conn{
Conn: tc,
log: zl.Sugar(),
rec: rec,
@@ -213,9 +205,8 @@ func Test_Reads(t *testing.T) {
for i, input := range tt.inputs {
c.zlibReqReader = reader
- tc.readBuf.Reset()
- _, err := tc.readBuf.Write(input)
- if err != nil {
+ tc.ResetReadBuf()
+ if err := tc.WriteReadBufBytes(input); err != nil {
t.Fatalf("writing bytes to test conn: %v", err)
}
_, err = c.Read(make([]byte, len(input)))
@@ -244,83 +235,62 @@ func Test_Reads(t *testing.T) {
}
}
-func castLine(t *testing.T, p []byte, clock tstime.Clock) []byte {
- t.Helper()
- j, err := json.Marshal([]any{
- clock.Now().Sub(clock.Now()).Seconds(),
- "o",
- string(p),
- })
+// Test_conn_ReadRand tests reading arbitrarily generated byte slices from conn to
+// test that we don't panic when parsing input from a broken or malicious
+// client.
+func Test_conn_ReadRand(t *testing.T) {
+ zl, err := zap.NewDevelopment()
if err != nil {
- t.Fatalf("error marshalling cast line: %v", err)
+ t.Fatalf("error creating a test logger: %v", err)
+ }
+ for i := range 1000 {
+ tc := &fakes.TestConn{}
+ tc.ResetReadBuf()
+ c := &conn{
+ Conn: tc,
+ log: zl.Sugar(),
+ }
+ bb := fakes.RandomBytes(t)
+ for j, input := range bb {
+ if err := tc.WriteReadBufBytes(input); err != nil {
+ t.Fatalf("[%d] writing bytes to test conn: %v", i, err)
+ }
+ f := func() {
+ c.Read(make([]byte, len(input)))
+ }
+ testPanic(t, f, fmt.Sprintf("[%d %d] Read panic parsing input of length %d", i, j, len(input)))
+ }
}
- return append(j, '\n')
}
-func resizeMsgBytes(t *testing.T, width, height int) []byte {
- t.Helper()
- bs, err := json.Marshal(spdyResizeMsg{Width: width, Height: height})
+// Test_conn_WriteRand calls conn.Write with an arbitrary input to validate that
+// it does not panic.
+func Test_conn_WriteRand(t *testing.T) {
+ zl, err := zap.NewDevelopment()
if err != nil {
- t.Fatalf("error marshalling resizeMsg: %v", err)
+ t.Fatalf("error creating a test logger: %v", err)
+ }
+ for i := range 100 {
+ tc := &fakes.TestConn{}
+ c := &conn{
+ Conn: tc,
+ log: zl.Sugar(),
+ }
+ bb := fakes.RandomBytes(t)
+ for j, input := range bb {
+ f := func() {
+ c.Write(input)
+ }
+ testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d", i, j, len(input)))
+ }
}
- return bs
}
-func asciinemaResizeMsg(t *testing.T, width, height int) []byte {
+func resizeMsgBytes(t *testing.T, width, height int) []byte {
t.Helper()
- ch := CastHeader{
- Width: width,
- Height: height,
- }
- bs, err := json.Marshal(ch)
+ bs, err := json.Marshal(spdyResizeMsg{Width: width, Height: height})
if err != nil {
- t.Fatalf("error marshalling CastHeader: %v", err)
+ t.Fatalf("error marshalling resizeMsg: %v", err)
}
- return append(bs, '\n')
-}
-
-type testConn struct {
- net.Conn
- // writeBuf contains whatever was send to the conn via Write.
- writeBuf bytes.Buffer
- // readBuf contains whatever was sent to the conn via Read.
- readBuf bytes.Buffer
- sync.RWMutex // protects the following
- closed bool
-}
-
-var _ net.Conn = &testConn{}
-
-func (tc *testConn) Read(b []byte) (int, error) {
- return tc.readBuf.Read(b)
-}
-
-func (tc *testConn) Write(b []byte) (int, error) {
- return tc.writeBuf.Write(b)
-}
-
-func (tc *testConn) Close() error {
- tc.Lock()
- defer tc.Unlock()
- tc.closed = true
- return nil
-}
-func (tc *testConn) isClosed() bool {
- tc.Lock()
- defer tc.Unlock()
- return tc.closed
-}
-
-type testSessionRecorder struct {
- // buf holds data that was sent to the session recorder.
- buf bytes.Buffer
-}
-
-func (t *testSessionRecorder) Write(b []byte) (int, error) {
- return t.buf.Write(b)
-}
-
-func (t *testSessionRecorder) Close() error {
- t.buf.Reset()
- return nil
+ return bs
}
diff --git a/cmd/k8s-operator/spdy-frame.go b/k8s-operator/sessionrecording/spdy/frame.go
similarity index 99%
rename from cmd/k8s-operator/spdy-frame.go
rename to k8s-operator/sessionrecording/spdy/frame.go
index 0ddefdfa1e9ce..54b29d33a9622 100644
--- a/cmd/k8s-operator/spdy-frame.go
+++ b/k8s-operator/sessionrecording/spdy/frame.go
@@ -3,7 +3,7 @@
//go:build !plan9
-package main
+package spdy
import (
"bytes"
diff --git a/cmd/k8s-operator/spdy-frame_test.go b/k8s-operator/sessionrecording/spdy/frame_test.go
similarity index 90%
rename from cmd/k8s-operator/spdy-frame_test.go
rename to k8s-operator/sessionrecording/spdy/frame_test.go
index 416ddfc8bc59d..4896cdcbf78a5 100644
--- a/cmd/k8s-operator/spdy-frame_test.go
+++ b/k8s-operator/sessionrecording/spdy/frame_test.go
@@ -3,17 +3,21 @@
//go:build !plan9
-package main
+package spdy
import (
"bytes"
"compress/zlib"
"encoding/binary"
+ "fmt"
"io"
"net/http"
"reflect"
"strings"
"testing"
+ "time"
+
+ "math/rand"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
@@ -200,6 +204,29 @@ func Test_spdyFrame_parseHeaders(t *testing.T) {
}
}
+// Test_spdyFrame_ParseRand calls spdyFrame.Parse with randomly generated bytes
+// to test that it doesn't panic.
+func Test_spdyFrame_ParseRand(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for i := range 100 {
+ n := r.Intn(4096)
+ b := make([]byte, n)
+ _, err := r.Read(b)
+ if err != nil {
+ t.Fatalf("error generating random byte slice: %v", err)
+ }
+ sf := &spdyFrame{}
+ f := func() {
+ sf.Parse(b, zl.Sugar())
+ }
+ testPanic(t, f, fmt.Sprintf("[%d] Parse panicked running with byte slice of length %d: %v", i, n, r))
+ }
+}
+
// payload takes a control frame type and a map with 0 or more header keys and
// values and returns a SPDY control frame payload with the header as SPDY zlib
// compressed header name/value block. The payload is padded with arbitrary
@@ -291,3 +318,13 @@ func header(hs map[string]string) http.Header {
}
return h
}
+
+func testPanic(t *testing.T, f func(), msg string) {
+ t.Helper()
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatal(msg, r)
+ }
+ }()
+ f()
+}
diff --git a/cmd/k8s-operator/zlib-reader.go b/k8s-operator/sessionrecording/spdy/zlib-reader.go
similarity index 99%
rename from cmd/k8s-operator/zlib-reader.go
rename to k8s-operator/sessionrecording/spdy/zlib-reader.go
index b29772be3a7e0..1eb654be35632 100644
--- a/cmd/k8s-operator/zlib-reader.go
+++ b/k8s-operator/sessionrecording/spdy/zlib-reader.go
@@ -3,7 +3,7 @@
//go:build !plan9
-package main
+package spdy
import (
"bytes"
diff --git a/cmd/k8s-operator/recorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go
similarity index 70%
rename from cmd/k8s-operator/recorder.go
rename to k8s-operator/sessionrecording/tsrecorder/tsrecorder.go
index ae17f382040db..30142e4bdd1a5 100644
--- a/cmd/k8s-operator/recorder.go
+++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go
@@ -3,7 +3,8 @@
//go:build !plan9
-package main
+// Package tsrecorder contains functionality for connecting to a tsrecorder instance.
+package tsrecorder
import (
"encoding/json"
@@ -16,9 +17,18 @@ import (
"tailscale.com/tstime"
)
+func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool) *Client {
+ return &Client{
+ start: start,
+ clock: clock,
+ conn: conn,
+ failOpen: failOpen,
+ }
+}
+
// recorder knows how to send the provided bytes to the configured tsrecorder
// instance in asciinema format.
-type recorder struct {
+type Client struct {
start time.Time
clock tstime.Clock
@@ -36,7 +46,7 @@ type recorder struct {
// Write appends timestamp to the provided bytes and sends them to the
// configured tsrecorder.
-func (rec *recorder) Write(p []byte) (err error) {
+func (rec *Client) Write(p []byte) (err error) {
if len(p) == 0 {
return nil
}
@@ -52,7 +62,7 @@ func (rec *recorder) Write(p []byte) (err error) {
return fmt.Errorf("error marhalling payload: %w", err)
}
j = append(j, '\n')
- if err := rec.writeCastLine(j); err != nil {
+ if err := rec.WriteCastLine(j); err != nil {
if !rec.failOpen {
return fmt.Errorf("error writing payload to recorder: %w", err)
}
@@ -61,7 +71,7 @@ func (rec *recorder) Write(p []byte) (err error) {
return nil
}
-func (rec *recorder) Close() error {
+func (rec *Client) Close() error {
rec.mu.Lock()
defer rec.mu.Unlock()
if rec.conn == nil {
@@ -74,15 +84,20 @@ func (rec *recorder) Close() error {
// writeCastLine sends bytes to the tsrecorder. The bytes should be in
// asciinema format.
-func (rec *recorder) writeCastLine(j []byte) error {
- rec.mu.Lock()
- defer rec.mu.Unlock()
- if rec.conn == nil {
+func (c *Client) WriteCastLine(j []byte) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.conn == nil {
return errors.New("recorder closed")
}
- _, err := rec.conn.Write(j)
+ _, err := c.conn.Write(j)
if err != nil {
return fmt.Errorf("recorder write error: %w", err)
}
return nil
}
+
+type ResizeMsg struct {
+ Width int `json:"width"`
+ Height int `json:"height"`
+}
diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go
new file mode 100644
index 0000000000000..82fd094d15364
--- /dev/null
+++ b/k8s-operator/sessionrecording/ws/conn.go
@@ -0,0 +1,301 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !plan9
+
+// package ws has functionality to parse 'kubectl exec' sessions streamed using
+// WebSocket protocol.
+package ws
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+
+ "go.uber.org/zap"
+ "k8s.io/apimachinery/pkg/util/remotecommand"
+ "tailscale.com/k8s-operator/sessionrecording/tsrecorder"
+ "tailscale.com/sessionrecording"
+ "tailscale.com/util/multierr"
+)
+
+// New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection.
+// The connection must be a hijacked connection for a 'kubectl exec' session using WebSocket protocol and a *.channel.k8s.io subprotocol.
+// The hijacked connection is used to transmit *.channel.k8s.io streams between Kubernetes client ('kubectl') and the destination proxy controlled by Kubernetes.
+// Data read from the underlying network connection is data sent via one of the streams from the client to the container.
+// Data written to the underlying connection is data sent from the container to the client.
+// We parse the data and send everything for the STDOUT/STDERR streams to the configured tsrecorder as an asciinema recording with the provided header.
+// https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#proposal-new-remotecommand-sub-protocol-version---v5channelk8sio
+func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, log *zap.SugaredLogger) net.Conn {
+ return &conn{
+ Conn: c,
+ rec: rec,
+ ch: ch,
+ log: log,
+ }
+}
+
+// conn is a wrapper around net.Conn. It reads the bytestream
+// for a 'kubectl exec' session, sends session recording data to the configured
+// recorder and forwards the raw bytes to the original destination.
+// A new conn is created per session.
+// conn only knows to how to read a 'kubectl exec' session that is streamed using WebSocket protocol.
+// https://www.rfc-editor.org/rfc/rfc6455
+type conn struct {
+ net.Conn
+ // rec knows how to send data to a tsrecorder instance.
+ rec *tsrecorder.Client
+ // ch is the asiinema CastHeader for a session.
+ ch sessionrecording.CastHeader
+ log *zap.SugaredLogger
+
+ rmu sync.Mutex // sequences reads
+ // currentReadMsg contains parsed contents of a websocket binary data message that
+ // is currently being read from the underlying net.Conn.
+ currentReadMsg *message
+ // readBuf contains bytes for a currently parsed binary data message
+ // read from the underlying conn. If the message is masked, it is
+ // unmasked in place, so having this buffer allows us to avoid modifying
+ // the original byte array.
+ readBuf bytes.Buffer
+
+ wmu sync.Mutex // sequences writes
+ writeCastHeaderOnce sync.Once
+ closed bool // connection is closed
+ // writeBuf contains bytes for a currently parsed binary data message
+ // being written to the underlying conn. If the message is masked, it is
+ // unmasked in place, so having this buffer allows us to avoid modifying
+ // the original byte array.
+ writeBuf bytes.Buffer
+ // currentWriteMsg contains parsed contents of a websocket binary data message that
+ // is currently being written to the underlying net.Conn.
+ currentWriteMsg *message
+}
+
+// Read reads bytes from the original connection and parses them as websocket
+// message fragments.
+// Bytes read from the original connection are the bytes sent from the Kubernetes client (kubectl) to the destination container via kubelet.
+
+// If the message is for the resize stream, sets the width
+// and height of the CastHeader for this connection.
+// The fragment can be incomplete.
+func (c *conn) Read(b []byte) (int, error) {
+ c.rmu.Lock()
+ defer c.rmu.Unlock()
+ n, err := c.Conn.Read(b)
+ if err != nil {
+ // It seems that we sometimes get a wrapped io.EOF, but the
+ // caller checks for io.EOF with ==.
+ if errors.Is(err, io.EOF) {
+ err = io.EOF
+ }
+ return 0, err
+ }
+ if n == 0 {
+ c.log.Debug("[unexpected] Read called for 0 length bytes")
+ return 0, nil
+ }
+
+ typ := messageType(opcode(b))
+ if (typ == noOpcode && c.readMsgIsIncomplete()) || c.readBufHasIncompleteFragment() { // subsequent fragment
+ if typ, err = c.curReadMsgType(); err != nil {
+ return 0, err
+ }
+ }
+
+ // A control message can not be fragmented and we are not interested in
+ // these messages. Just return.
+ if isControlMessage(typ) {
+ return n, nil
+ }
+
+ // The only data message type that Kubernetes supports is binary message.
+ // If we received another message type, return and let the API server close the connection.
+ // https://github.com/kubernetes/client-go/blob/release-1.30/tools/remotecommand/websocket.go#L281
+ if typ != binaryMessage {
+ c.log.Infof("[unexpected] received a data message with a type that is not binary message type %v", typ)
+ return n, nil
+ }
+
+ readMsg := &message{typ: typ} // start a new message...
+ // ... or pick up an already started one if the previous fragment was not final.
+ if c.readMsgIsIncomplete() || c.readBufHasIncompleteFragment() {
+ readMsg = c.currentReadMsg
+ }
+
+ if _, err := c.readBuf.Write(b[:n]); err != nil {
+ return 0, fmt.Errorf("[unexpected] error writing message contents to read buffer: %w", err)
+ }
+
+ ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log)
+ if err != nil {
+ return 0, fmt.Errorf("error parsing message: %v", err)
+ }
+ if !ok { // incomplete fragment
+ return n, nil
+ }
+ c.readBuf.Next(len(readMsg.raw))
+
+ if readMsg.isFinalized {
+ // Stream IDs for websocket streams are static.
+ // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218
+ if readMsg.streamID.Load() == remotecommand.StreamResize {
+ var err error
+ var msg tsrecorder.ResizeMsg
+ if err = json.Unmarshal(readMsg.payload, &msg); err != nil {
+ return 0, fmt.Errorf("error umarshalling resize message: %w", err)
+ }
+ c.ch.Width = msg.Width
+ c.ch.Height = msg.Height
+ }
+ }
+ c.currentReadMsg = readMsg
+ return n, nil
+}
+
+// Write parses the written bytes as WebSocket message fragment. If the message
+// is for stdout or stderr streams, it is written to the configured tsrecorder.
+// A message fragment can be incomplete.
+func (c *conn) Write(b []byte) (int, error) {
+ c.wmu.Lock()
+ defer c.wmu.Unlock()
+ if len(b) == 0 {
+ c.log.Debug("[unexpected] Write called with 0 bytes")
+ return 0, nil
+ }
+
+ typ := messageType(opcode(b))
+ // If we are in process of parsing a message fragment, the received
+ // bytes are not structured as a message fragment and can not be used to
+ // determine a message fragment.
+ if c.writeBufHasIncompleteFragment() { // buffer contains previous incomplete fragment
+ var err error
+ if typ, err = c.curWriteMsgType(); err != nil {
+ return 0, err
+ }
+ }
+
+ if isControlMessage(typ) {
+ return c.Conn.Write(b)
+ }
+
+ writeMsg := &message{typ: typ} // start a new message...
+ // ... or continue the existing one if it has not been finalized.
+ if c.writeMsgIsIncomplete() || c.writeBufHasIncompleteFragment() {
+ writeMsg = c.currentWriteMsg
+ }
+
+ if _, err := c.writeBuf.Write(b); err != nil {
+ c.log.Errorf("write: error writing to write buf: %v", err)
+ return 0, fmt.Errorf("[unexpected] error writing to internal write buffer: %w", err)
+ }
+
+ ok, err := writeMsg.Parse(c.writeBuf.Bytes(), c.log)
+ if err != nil {
+ c.log.Errorf("write: parsing a message errored: %v", err)
+ return 0, fmt.Errorf("write: error parsing message: %v", err)
+ }
+ c.currentWriteMsg = writeMsg
+ if !ok { // incomplete fragment
+ return len(b), nil
+ }
+ c.writeBuf.Next(len(writeMsg.raw)) // advance frame
+
+ if len(writeMsg.payload) != 0 && writeMsg.isFinalized {
+ if writeMsg.streamID.Load() == remotecommand.StreamStdOut || writeMsg.streamID.Load() == remotecommand.StreamStdErr {
+ var err error
+ c.writeCastHeaderOnce.Do(func() {
+ var j []byte
+ j, err = json.Marshal(c.ch)
+ if err != nil {
+ c.log.Errorf("error marhsalling conn: %v", err)
+ return
+ }
+ j = append(j, '\n')
+ err = c.rec.WriteCastLine(j)
+ if err != nil {
+ c.log.Errorf("received error from recorder: %v", err)
+ }
+ })
+ if err != nil {
+ return 0, fmt.Errorf("error writing CastHeader: %w", err)
+ }
+ if err := c.rec.Write(writeMsg.payload); err != nil {
+ return 0, fmt.Errorf("error writing message to recorder: %v", err)
+ }
+ }
+ }
+ _, err = c.Conn.Write(c.currentWriteMsg.raw)
+ if err != nil {
+ c.log.Errorf("write: error writing to conn: %v", err)
+ }
+ return len(b), nil
+}
+
+func (c *conn) Close() error {
+ c.wmu.Lock()
+ defer c.wmu.Unlock()
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+ connCloseErr := c.Conn.Close()
+ recCloseErr := c.rec.Close()
+ return multierr.New(connCloseErr, recCloseErr)
+}
+
+// writeBufHasIncompleteFragment returns true if the latest data message
+// fragment written to the connection was incomplete and the following write
+// must be the remaining payload bytes of that fragment.
+func (c *conn) writeBufHasIncompleteFragment() bool {
+ return c.writeBuf.Len() != 0
+}
+
+// readBufHasIncompleteFragment returns true if the latest data message
+// fragment read from the connection was incomplete and the following read
+// must be the remaining payload bytes of that fragment.
+func (c *conn) readBufHasIncompleteFragment() bool {
+ return c.readBuf.Len() != 0
+}
+
+// writeMsgIsIncomplete returns true if the latest WebSocket message written to
+// the connection was fragmented and the next data message fragment written to
+// the connection must be a fragment of that message.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.4
+func (c *conn) writeMsgIsIncomplete() bool {
+ return c.currentWriteMsg != nil && !c.currentWriteMsg.isFinalized
+}
+
+// readMsgIsIncomplete returns true if the latest WebSocket message written to
+// the connection was fragmented and the next data message fragment written to
+// the connection must be a fragment of that message.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.4
+func (c *conn) readMsgIsIncomplete() bool {
+ return c.currentReadMsg != nil && !c.currentReadMsg.isFinalized
+}
+func (c *conn) curReadMsgType() (messageType, error) {
+ if c.currentReadMsg != nil {
+ return c.currentReadMsg.typ, nil
+ }
+ return 0, errors.New("[unexpected] attempted to determine type for nil message")
+}
+
+func (c *conn) curWriteMsgType() (messageType, error) {
+ if c.currentWriteMsg != nil {
+ return c.currentWriteMsg.typ, nil
+ }
+ return 0, errors.New("[unexpected] attempted to determine type for nil message")
+}
+
+// opcode reads the websocket message opcode that denotes the message type.
+// opcode is contained in bits [4-8] of the message.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+func opcode(b []byte) int {
+ // 0xf = 00001111; b & 00001111 zeroes out bits [0 - 3] of b
+ var mask byte = 0xf
+ return int(b[0] & mask)
+}
diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go
new file mode 100644
index 0000000000000..2fcbeb7cabdc1
--- /dev/null
+++ b/k8s-operator/sessionrecording/ws/conn_test.go
@@ -0,0 +1,257 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !plan9
+
+package ws
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "go.uber.org/zap"
+ "k8s.io/apimachinery/pkg/util/remotecommand"
+ "tailscale.com/k8s-operator/sessionrecording/fakes"
+ "tailscale.com/k8s-operator/sessionrecording/tsrecorder"
+ "tailscale.com/sessionrecording"
+ "tailscale.com/tstest"
+)
+
+func Test_conn_Read(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Resize stream ID + {"width": 10, "height": 20}
+ testResizeMsg := []byte{byte(remotecommand.StreamResize), 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}
+ lenResizeMsgPayload := byte(len(testResizeMsg))
+
+ tests := []struct {
+ name string
+ inputs [][]byte
+ wantWidth int
+ wantHeight int
+ }{
+ {
+ name: "single_read_control_message",
+ inputs: [][]byte{{0x88, 0x0}},
+ },
+ {
+ name: "single_read_resize_message",
+ inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)},
+ wantWidth: 10,
+ wantHeight: 20,
+ },
+ {
+ name: "two_reads_resize_message",
+ inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}},
+ wantWidth: 10,
+ wantHeight: 20,
+ },
+ {
+ name: "three_reads_resize_message_with_split_fragment",
+ inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}},
+ wantWidth: 10,
+ wantHeight: 20,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tc := &fakes.TestConn{}
+ tc.ResetReadBuf()
+ c := &conn{
+ Conn: tc,
+ log: zl.Sugar(),
+ }
+ for i, input := range tt.inputs {
+ if err := tc.WriteReadBufBytes(input); err != nil {
+ t.Fatalf("writing bytes to test conn: %v", err)
+ }
+ _, err := c.Read(make([]byte, len(input)))
+ if err != nil {
+ t.Errorf("[%d] conn.Read() errored %v", i, err)
+ return
+ }
+ }
+ if tt.wantHeight != 0 || tt.wantWidth != 0 {
+ if tt.wantWidth != c.ch.Width {
+ t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width)
+ }
+ if tt.wantHeight != c.ch.Height {
+ t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height)
+ }
+ }
+ })
+ }
+}
+
+func Test_conn_Write(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cl := tstest.NewClock(tstest.ClockOpts{})
+ tests := []struct {
+ name string
+ inputs [][]byte
+ wantForwarded []byte
+ wantRecorded []byte
+ firstWrite bool
+ width int
+ height int
+ }{
+ {
+ name: "single_write_control_frame",
+ inputs: [][]byte{{0x88, 0x0}},
+ wantForwarded: []byte{0x88, 0x0},
+ },
+ {
+ name: "single_write_stdout_data_message",
+ inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}},
+ wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8},
+ wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl),
+ },
+ {
+ name: "single_write_stderr_data_message",
+ inputs: [][]byte{{0x82, 0x3, 0x2, 0x7, 0x8}},
+ wantForwarded: []byte{0x82, 0x3, 0x2, 0x7, 0x8},
+ wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl),
+ },
+ {
+ name: "single_write_stdin_data_message",
+ inputs: [][]byte{{0x82, 0x3, 0x0, 0x7, 0x8}},
+ wantForwarded: []byte{0x82, 0x3, 0x0, 0x7, 0x8},
+ },
+ {
+ name: "single_write_stdout_data_message_with_cast_header",
+ inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}},
+ wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8},
+ wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8}, cl)...),
+ width: 10,
+ height: 20,
+ firstWrite: true,
+ },
+ {
+ name: "two_writes_stdout_data_message",
+ inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}},
+ wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5},
+ wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl),
+ },
+ {
+ name: "three_writes_stdout_data_message_with_split_fragment",
+ inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}},
+ wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5},
+ wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tc := &fakes.TestConn{}
+ sr := &fakes.TestSessionRecorder{}
+ rec := tsrecorder.New(sr, cl, cl.Now(), true)
+ c := &conn{
+ Conn: tc,
+ log: zl.Sugar(),
+ ch: sessionrecording.CastHeader{
+ Width: tt.width,
+ Height: tt.height,
+ },
+ rec: rec,
+ }
+ if !tt.firstWrite {
+ // This test case does not intend to test that cast header gets written once.
+ c.writeCastHeaderOnce.Do(func() {})
+ }
+ for i, input := range tt.inputs {
+ _, err := c.Write(input)
+ if err != nil {
+ t.Fatalf("[%d] conn.Write() errored: %v", i, err)
+ }
+ }
+ // Assert that the expected bytes have been forwarded to the original destination.
+ gotForwarded := tc.WriteBufBytes()
+ if !reflect.DeepEqual(gotForwarded, tt.wantForwarded) {
+ t.Errorf("expected bytes not forwarded, wants\n%x\ngot\n%x", tt.wantForwarded, gotForwarded)
+ }
+
+ // Assert that the expected bytes have been forwarded to the session recorder.
+ gotRecorded := sr.Bytes()
+ if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) {
+ t.Errorf("expected bytes not recorded, wants\n%b\ngot\n%b", tt.wantRecorded, gotRecorded)
+ }
+ })
+ }
+}
+
+// Test_conn_ReadRand tests reading arbitrarily generated byte slices from conn to
+// test that we don't panic when parsing input from a broken or malicious
+// client.
+func Test_conn_ReadRand(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatalf("error creating a test logger: %v", err)
+ }
+ for i := range 100 {
+ tc := &fakes.TestConn{}
+ tc.ResetReadBuf()
+ c := &conn{
+ Conn: tc,
+ log: zl.Sugar(),
+ }
+ bb := fakes.RandomBytes(t)
+ for j, input := range bb {
+ if err := tc.WriteReadBufBytes(input); err != nil {
+ t.Fatalf("[%d] writing bytes to test conn: %v", i, err)
+ }
+ f := func() {
+ c.Read(make([]byte, len(input)))
+ }
+ testPanic(t, f, fmt.Sprintf("[%d %d] Read panic parsing input of length %d first bytes: %v, current read message: %+#v", i, j, len(input), firstBytes(input), c.currentReadMsg))
+ }
+ }
+}
+
+// Test_conn_WriteRand calls conn.Write with an arbitrary input to validate that it does not
+// panic.
+func Test_conn_WriteRand(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatalf("error creating a test logger: %v", err)
+ }
+ cl := tstest.NewClock(tstest.ClockOpts{})
+ sr := &fakes.TestSessionRecorder{}
+ rec := tsrecorder.New(sr, cl, cl.Now(), true)
+ for i := range 100 {
+ tc := &fakes.TestConn{}
+ c := &conn{
+ Conn: tc,
+ log: zl.Sugar(),
+ rec: rec,
+ }
+ bb := fakes.RandomBytes(t)
+ for j, input := range bb {
+ f := func() {
+ c.Write(input)
+ }
+ testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg))
+ }
+ }
+}
+
+func testPanic(t *testing.T, f func(), msg string) {
+ t.Helper()
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatal(msg, r)
+ }
+ }()
+ f()
+}
+
+func firstBytes(b []byte) []byte {
+ if len(b) < 10 {
+ return b
+ }
+ return b[:10]
+}
diff --git a/k8s-operator/sessionrecording/ws/message.go b/k8s-operator/sessionrecording/ws/message.go
new file mode 100644
index 0000000000000..713febec76ae8
--- /dev/null
+++ b/k8s-operator/sessionrecording/ws/message.go
@@ -0,0 +1,267 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !plan9
+
+package ws
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync/atomic"
+
+ "github.com/pkg/errors"
+ "go.uber.org/zap"
+
+ "golang.org/x/net/websocket"
+)
+
+const (
+ noOpcode messageType = 0 // continuation frame for fragmented messages
+ binaryMessage messageType = 2
+)
+
+// messageType is the type of a websocket data or control message as defined by opcode.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+// Known types of control messages are close, ping and pong.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.5
+// The only data message type supported by Kubernetes is binary message
+// https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L281
+type messageType int
+
+// message is a parsed Websocket Message.
+type message struct {
+ // payload is the contents of the so far parsed Websocket
+ // data Message payload, potentially from multiple fragments written by
+ // multiple invocations of Parse. As per RFC 6455 We can assume that the
+ // fragments will always arrive in order and data messages will not be
+ // interleaved.
+ payload []byte
+
+ // isFinalized is set to true if msgPayload contains full contents of
+ // the message (the final fragment has been received).
+ isFinalized bool
+
+ // streamID is the stream to which the message belongs, i.e stdin, stout
+ // etc. It is one of the stream IDs defined in
+ // https://github.com/kubernetes/apimachinery/blob/73d12d09c5be8703587b5127416eb83dc3b7e182/pkg/util/httpstream/wsstream/doc.go#L23-L36
+ streamID atomic.Uint32
+
+ // typ is the type of a WebsocketMessage as defined by its opcode
+ // https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+ typ messageType
+ raw []byte
+}
+
+// Parse accepts a websocket message fragment as a byte slice and parses its contents.
+// It returns true if the fragment is complete, false if the fragment is incomplete.
+// If the fragment is incomplete, Parse will be called again with the same fragment + more bytes when those are received.
+// If the fragment is complete, it will be parsed into msg.
+// A complete fragment can be:
+// - a fragment that consists of a whole message
+// - an initial fragment for a message for which we expect more fragments
+// - a subsequent fragment for a message that we are currently parsing and whose so-far parsed contents are stored in msg.
+// Parse must not be called with bytes that don't contain fragment header (so, no less than 2 bytes).
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-------+-+-------------+-------------------------------+
+// |F|R|R|R| opcode|M| Payload len | Extended payload length |
+// |I|S|S|S| (4) |A| (7) | (16/64) |
+// |N|V|V|V| |S| | (if payload len==126/127) |
+// | |1|2|3| |K| | |
+// +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
+// | Extended payload length continued, if payload len == 127 |
+// + - - - - - - - - - - - - - - - +-------------------------------+
+// | |Masking-key, if MASK set to 1 |
+// +-------------------------------+-------------------------------+
+// | Masking-key (continued) | Payload Data |
+// +-------------------------------- - - - - - - - - - - - - - - - +
+// : Payload Data continued ... :
+// + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
+// | Payload Data continued ... |
+// +---------------------------------------------------------------+
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+//
+// Fragmentation rules:
+// An unfragmented message consists of a single frame with the FIN
+// bit set (Section 5.2) and an opcode other than 0.
+// A fragmented message consists of a single frame with the FIN bit
+// clear and an opcode other than 0, followed by zero or more frames
+// with the FIN bit clear and the opcode set to 0, and terminated by
+// a single frame with the FIN bit set and an opcode of 0.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.4
+func (msg *message) Parse(b []byte, log *zap.SugaredLogger) (bool, error) {
+ if len(b) < 2 {
+ return false, fmt.Errorf("[unexpected] Parse should not be called with less than 2 bytes, got %d bytes", len(b))
+ }
+ if msg.typ != binaryMessage {
+ return false, fmt.Errorf("[unexpected] internal error: attempted to parse a message with type %d", msg.typ)
+ }
+ isInitialFragment := len(msg.raw) == 0
+
+ msg.isFinalized = isFinalFragment(b)
+
+ maskSet := isMasked(b)
+
+ payloadLength, payloadOffset, maskOffset, err := fragmentDimensions(b, maskSet)
+ if err != nil {
+ return false, fmt.Errorf("error determining payload length: %w", err)
+ }
+ log.Debugf("parse: parsing a message fragment with payload length: %d payload offset: %d maskOffset: %d mask set: %t, is finalized: %t, is initial fragment: %t", payloadLength, payloadOffset, maskOffset, maskSet, msg.isFinalized, isInitialFragment)
+
+ if len(b) < int(payloadOffset+payloadLength) { // incomplete fragment
+ return false, nil
+ }
+ // TODO (irbekrm): perhaps only do this extra allocation if we know we
+ // will need to unmask?
+ msg.raw = make([]byte, int(payloadOffset)+int(payloadLength))
+ copy(msg.raw, b[:payloadOffset+payloadLength])
+
+ // Extract the payload.
+ msgPayload := b[payloadOffset : payloadOffset+payloadLength]
+
+ // Unmask the payload if needed.
+ // TODO (irbekrm): instead of unmasking all of the payload each time,
+ // determine if the payload is for a resize message early and skip
+ // unmasking the remaining bytes if not.
+ if maskSet {
+ m := b[maskOffset:payloadOffset]
+ var mask [4]byte
+ copy(mask[:], m)
+ maskBytes(mask, msgPayload)
+ }
+
+ // Determine what stream the message is for. Stream ID of a Kubernetes
+ // streaming session is a 32bit integer, stored in the first byte of the
+ // message payload.
+ // https://github.com/kubernetes/apimachinery/commit/73d12d09c5be8703587b5127416eb83dc3b7e182#diff-291f96e8632d04d2d20f5fb00f6b323492670570d65434e8eac90c7a442d13bdR23-R36
+ if len(msgPayload) == 0 {
+ return false, errors.New("[unexpected] received a message fragment with no stream ID")
+ }
+
+ streamID := uint32(msgPayload[0])
+ if !isInitialFragment && msg.streamID.Load() != streamID {
+ return false, fmt.Errorf("[unexpected] received message fragments with mismatched streamIDs %d and %d", msg.streamID.Load(), streamID)
+ }
+ msg.streamID.Store(streamID)
+
+ // This is normal, Kubernetes seem to send a couple data messages with
+ // no payloads at the start.
+ if len(msgPayload) < 2 {
+ return true, nil
+ }
+ msgPayload = msgPayload[1:] // remove the stream ID byte
+ msg.payload = append(msg.payload, msgPayload...)
+ return true, nil
+}
+
+// maskBytes applies mask to bytes in place.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.3
+func maskBytes(key [4]byte, b []byte) {
+ for i := range b {
+ b[i] = b[i] ^ key[i%4]
+ }
+}
+
+// isControlMessage returns true if the message type is one of the known control
+// frame message types.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.5
+func isControlMessage(t messageType) bool {
+ const (
+ closeMessage messageType = 8
+ pingMessage messageType = 9
+ pongMessage messageType = 10
+ )
+ return t == closeMessage || t == pingMessage || t == pongMessage
+}
+
+// isFinalFragment can be called with websocket message fragment and returns true if
+// the fragment is the final fragment of a websocket message.
+func isFinalFragment(b []byte) bool {
+ return extractFirstBit(b[0]) != 0
+}
+
+// isMasked can be called with a websocket message fragment and returns true if
+// the payload of the message is masked. It uses the mask bit to determine if
+// the payload is masked.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.3
+func isMasked(b []byte) bool {
+ return extractFirstBit(b[1]) != 0
+}
+
+// extractFirstBit extracts first bit of a byte by zeroing out all the other
+// bits.
+func extractFirstBit(b byte) byte {
+ return b & 0x80
+}
+
+// zeroFirstBit returns the provided byte with the first bit set to 0.
+func zeroFirstBit(b byte) byte {
+ return b & 0x7f
+}
+
+// fragmentDimensions returns payload length as well as payload offset and mask offset.
+func fragmentDimensions(b []byte, maskSet bool) (payloadLength, payloadOffset, maskOffset uint64, _ error) {
+
+ // payload length can be stored either in bits [9-15] or in bytes 2, 3
+ // or in bytes 2, 3, 4, 5, 6, 7.
+ // https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+ // 0 1 2 3
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // +-+-+-+-+-------+-+-------------+-------------------------------+
+ // |F|R|R|R| opcode|M| Payload len | Extended payload length |
+ // |I|S|S|S| (4) |A| (7) | (16/64) |
+ // |N|V|V|V| |S| | (if payload len==126/127) |
+ // | |1|2|3| |K| | |
+ // +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
+ // | Extended payload length continued, if payload len == 127 |
+ // + - - - - - - - - - - - - - - - +-------------------------------+
+ // | |Masking-key, if MASK set to 1 |
+ // +-------------------------------+-------------------------------+
+ payloadLengthIndicator := zeroFirstBit(b[1])
+ switch {
+ case payloadLengthIndicator < 126:
+ maskOffset = 2
+ payloadLength = uint64(payloadLengthIndicator)
+ case payloadLengthIndicator == 126:
+ maskOffset = 4
+ if len(b) < int(maskOffset) {
+ return 0, 0, 0, fmt.Errorf("invalid message fragment- length indicator suggests that length is stored in bytes 2:4, but message length is only %d", len(b))
+ }
+ payloadLength = uint64(binary.BigEndian.Uint16(b[2:4]))
+ case payloadLengthIndicator == 127:
+ maskOffset = 10
+ if len(b) < int(maskOffset) {
+ return 0, 0, 0, fmt.Errorf("invalid message fragment- length indicator suggests that length is stored in bytes 2:10, but message length is only %d", len(b))
+ }
+ payloadLength = binary.BigEndian.Uint64(b[2:10])
+ default:
+ return 0, 0, 0, fmt.Errorf("unexpected payload length indicator value: %v", payloadLengthIndicator)
+ }
+
+ // Ensure that a rogue or broken client doesn't cause us attempt to
+ // allocate a huge array by setting a high payload size.
+ // websocket.DefaultMaxPayloadBytes is the maximum payload size accepted
+ // by server side of this connection, so we can safely reject messages
+ // with larger payload size.
+ if payloadLength > websocket.DefaultMaxPayloadBytes {
+ return 0, 0, 0, fmt.Errorf("[unexpected]: too large payload size: %v", payloadLength)
+ }
+
+ // Masking key can take up 0 or 4 bytes- we need to take that into
+ // account when determining payload offset.
+ // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ // ....
+ // + - - - - - - - - - - - - - - - +-------------------------------+
+ // | |Masking-key, if MASK set to 1 |
+ // +-------------------------------+-------------------------------+
+ // | Masking-key (continued) | Payload Data |
+ // + - - - - - - - - - - - - - - - +-------------------------------+
+ // ...
+ if maskSet {
+ payloadOffset = maskOffset + 4
+ } else {
+ payloadOffset = maskOffset
+ }
+ return
+}
diff --git a/k8s-operator/sessionrecording/ws/message_test.go b/k8s-operator/sessionrecording/ws/message_test.go
new file mode 100644
index 0000000000000..f634f86dc55c2
--- /dev/null
+++ b/k8s-operator/sessionrecording/ws/message_test.go
@@ -0,0 +1,215 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !plan9
+
+package ws
+
+import (
+ "encoding/binary"
+ "fmt"
+ "reflect"
+ "testing"
+ "time"
+
+ "math/rand"
+
+ "go.uber.org/zap"
+ "golang.org/x/net/websocket"
+)
+
+func Test_msg_Parse(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatalf("error creating a test logger: %v", err)
+ }
+ testMask := [4]byte{1, 2, 3, 4}
+ bs126, bs126Len := bytesSlice2ByteLen(t)
+ bs127, bs127Len := byteSlice8ByteLen(t)
+ tests := []struct {
+ name string
+ b []byte
+ initialPayload []byte
+ wantPayload []byte
+ wantIsFinalized bool
+ wantStreamID uint32
+ wantErr bool
+ }{
+ {
+ name: "single_fragment_stdout_stream_no_payload_no_mask",
+ b: []byte{0x82, 0x1, 0x1},
+ wantPayload: nil,
+ wantIsFinalized: true,
+ wantStreamID: 1,
+ },
+ {
+ name: "single_fragment_stderr_steam_no_payload_has_mask",
+ b: append([]byte{0x82, 0x81, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x2})...),
+ wantPayload: nil,
+ wantIsFinalized: true,
+ wantStreamID: 2,
+ },
+ {
+ name: "single_fragment_stdout_stream_no_mask_has_payload",
+ b: []byte{0x82, 0x3, 0x1, 0x7, 0x8},
+ wantPayload: []byte{0x7, 0x8},
+ wantIsFinalized: true,
+ wantStreamID: 1,
+ },
+ {
+ name: "single_fragment_stdout_stream_has_mask_has_payload",
+ b: append([]byte{0x82, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...),
+ wantPayload: []byte{0x7, 0x8},
+ wantIsFinalized: true,
+ wantStreamID: 1,
+ },
+ {
+ name: "initial_fragment_stdout_stream_no_mask_has_payload",
+ b: []byte{0x2, 0x3, 0x1, 0x7, 0x8},
+ wantPayload: []byte{0x7, 0x8},
+ wantStreamID: 1,
+ },
+ {
+ name: "initial_fragment_stdout_stream_has_mask_has_payload",
+ b: append([]byte{0x2, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...),
+ wantPayload: []byte{0x7, 0x8},
+ wantStreamID: 1,
+ },
+ {
+ name: "subsequent_fragment_stdout_stream_no_mask_has_payload",
+ b: []byte{0x0, 0x3, 0x1, 0x7, 0x8},
+ initialPayload: []byte{0x1, 0x2, 0x3},
+ wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8},
+ wantStreamID: 1,
+ },
+ {
+ name: "subsequent_fragment_stdout_stream_has_mask_has_payload",
+ b: append([]byte{0x0, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...),
+ initialPayload: []byte{0x1, 0x2, 0x3},
+ wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8},
+ wantStreamID: 1,
+ },
+ {
+ name: "final_fragment_stdout_stream_no_mask_has_payload",
+ b: []byte{0x80, 0x3, 0x1, 0x7, 0x8},
+ initialPayload: []byte{0x1, 0x2, 0x3},
+ wantIsFinalized: true,
+ wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8},
+ wantStreamID: 1,
+ },
+ {
+ name: "final_fragment_stdout_stream_has_mask_has_payload",
+ b: append([]byte{0x80, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...),
+ initialPayload: []byte{0x1, 0x2, 0x3},
+ wantIsFinalized: true,
+ wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8},
+ wantStreamID: 1,
+ },
+ {
+ name: "single_large_fragment_no_mask_length_hint_126",
+ b: append(append([]byte{0x80, 0x7e}, bs126Len...), append([]byte{0x1}, bs126...)...),
+ wantIsFinalized: true,
+ wantPayload: bs126,
+ wantStreamID: 1,
+ },
+ {
+ name: "single_large_fragment_no_mask_length_hint_127",
+ b: append(append([]byte{0x80, 0x7f}, bs127Len...), append([]byte{0x1}, bs127...)...),
+ wantIsFinalized: true,
+ wantPayload: bs127,
+ wantStreamID: 1,
+ },
+ {
+ name: "zero_length_bytes",
+ b: []byte{},
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ msg := &message{
+ typ: binaryMessage,
+ payload: tt.initialPayload,
+ }
+ if _, err := msg.Parse(tt.b, zl.Sugar()); (err != nil) != tt.wantErr {
+ t.Errorf("msg.Parse() = %v, wantsErr: %t", err, tt.wantErr)
+ }
+ if msg.isFinalized != tt.wantIsFinalized {
+ t.Errorf("wants message to be finalized: %t, got: %t", tt.wantIsFinalized, msg.isFinalized)
+ }
+ if msg.streamID.Load() != tt.wantStreamID {
+ t.Errorf("wants stream ID: %d, got: %d", tt.wantStreamID, msg.streamID.Load())
+ }
+ if !reflect.DeepEqual(msg.payload, tt.wantPayload) {
+ t.Errorf("unexpected message payload after Parse, wants %b got %b", tt.wantPayload, msg.payload)
+ }
+ })
+ }
+}
+
+// Test_msg_Parse_Rand calls Parse with a randomly generated input to verify
+// that it doesn't panic.
+func Test_msg_Parse_Rand(t *testing.T) {
+ zl, err := zap.NewDevelopment()
+ if err != nil {
+ t.Fatalf("error creating a test logger: %v", err)
+ }
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for i := range 100 {
+ n := r.Intn(4096)
+ b := make([]byte, n)
+ _, err := r.Read(b)
+ if err != nil {
+ t.Fatalf("error generating random byte slice: %v", err)
+ }
+ msg := message{typ: binaryMessage}
+ f := func() {
+ msg.Parse(b, zl.Sugar())
+ }
+ testPanic(t, f, fmt.Sprintf("[%d] Parse panicked running with byte slice of length %d: %v", i, n, r))
+ }
+}
+
+// byteSlice2ByteLen generates a number that represents websocket message fragment length and is stored in an 8 byte slice.
+// Returns the byte slice with the length as well as a slice of arbitrary bytes of the given length.
+// This is used to generate test input representing websocket message with payload length hint 126.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+func bytesSlice2ByteLen(t *testing.T) ([]byte, []byte) {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ var n uint16
+ n = uint16(rand.Intn(65535 - 1)) // space for and additional 1 byte stream ID
+ b := make([]byte, n)
+ _, err := r.Read(b)
+ if err != nil {
+ t.Fatalf("error generating random byte slice: %v ", err)
+ }
+ bb := make([]byte, 2)
+ binary.BigEndian.PutUint16(bb, n+1) // + stream ID
+ return b, bb
+}
+
+// byteSlice8ByteLen generates a number that represents websocket message fragment length and is stored in an 8 byte slice.
+// Returns the byte slice with the length as well as a slice of arbitrary bytes of the given length.
+// This is used to generate test input representing websocket message with payload length hint 127.
+// https://www.rfc-editor.org/rfc/rfc6455#section-5.2
+func byteSlice8ByteLen(t *testing.T) ([]byte, []byte) {
+ nanos := time.Now().UnixNano()
+ t.Logf("Creating random source with seed %v", nanos)
+ r := rand.New(rand.NewSource(nanos))
+ var n uint64
+ n = uint64(rand.Intn(websocket.DefaultMaxPayloadBytes - 1)) // space for and additional 1 byte stream ID
+ t.Logf("byteSlice8ByteLen: generating message payload of length %d", n)
+ b := make([]byte, n)
+ _, err := r.Read(b)
+ if err != nil {
+ t.Fatalf("error generating random byte slice: %v ", err)
+ }
+ bb := make([]byte, 8)
+ binary.BigEndian.PutUint64(bb, n+1) // + stream ID
+ return b, bb
+}
+
+func maskedBytes(mask [4]byte, b []byte) []byte {
+ maskBytes(mask, b)
+ return b
+}
diff --git a/licenses/android.md b/licenses/android.md
index f46a8d270af67..64e321de6a9ed 100644
--- a/licenses/android.md
+++ b/licenses/android.md
@@ -60,8 +60,8 @@ Client][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE))
- [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE))
- [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE))
- - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/2f5d148bcfe1/LICENSE))
- - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE))
+ - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE))
+ - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE))
- [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE))
- [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE))
- [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE))
@@ -71,18 +71,18 @@ Client][]. See also the dependencies in the [Tailscale CLI][].
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE))
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE))
- [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE))
- - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE))
+ - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE))
- [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE))
- [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/c58ccf4b:LICENSE))
- - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.18.0:LICENSE))
- - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE))
+ - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE))
+ - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE))
- - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
+ - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
- [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE))
- - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.22.0:LICENSE))
- - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/ee1e1f6070e3/LICENSE))
+ - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.23.0:LICENSE))
+ - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE))
- [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown))
- - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket/blob/v1.8.10/LICENSE.txt))
+ - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket-old/blob/v1.8.10/LICENSE.txt))
- [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE))
diff --git a/licenses/apple.md b/licenses/apple.md
index 98c7405889070..5dd9b975c0f15 100644
--- a/licenses/apple.md
+++ b/licenses/apple.md
@@ -29,6 +29,7 @@ See also the dependencies in the [Tailscale CLI][].
- [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.2/LICENSE))
- [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.2/internal/sync/singleflight/LICENSE))
- [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE))
+ - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt))
- [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE))
- [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md))
- [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE))
@@ -65,8 +66,8 @@ See also the dependencies in the [Tailscale CLI][].
- [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE))
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE))
- [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE))
- - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/2f5d148bcfe1/LICENSE))
- - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE))
+ - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE))
+ - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE))
- [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE))
- [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE))
- [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE))
@@ -74,16 +75,15 @@ See also the dependencies in the [Tailscale CLI][].
- [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE))
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE))
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE))
- - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE))
+ - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE))
- [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE))
- - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE))
+ - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE))
- - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
+ - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
- [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE))
- - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/ee1e1f6070e3/LICENSE))
- - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket/blob/v1.8.10/LICENSE.txt))
+ - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE))
- [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE))
## Additional Dependencies
diff --git a/licenses/tailscale.md b/licenses/tailscale.md
index 06620be74665f..2fd07fc3f058e 100644
--- a/licenses/tailscale.md
+++ b/licenses/tailscale.md
@@ -34,8 +34,9 @@ Some packages may only be included on certain architectures or operating systems
- [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE))
- [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE))
- [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE))
+ - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt))
- [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE))
- - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.21/LICENSE))
+ - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE))
- [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE))
- [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md))
- [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE))
@@ -84,8 +85,8 @@ Some packages may only be included on certain architectures or operating systems
- [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE))
- [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE))
- [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE))
- - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/2f5d148bcfe1/LICENSE))
- - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE))
+ - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE))
+ - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE))
- [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE))
- [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md))
- [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.12.0/LICENSE))
@@ -95,20 +96,19 @@ Some packages may only be included on certain architectures or operating systems
- [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE))
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE))
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE))
- - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE))
+ - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE))
- [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE))
- - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE))
+ - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.16.0:LICENSE))
- [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE))
- - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
+ - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
- [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE))
- [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2))
- [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3))
- - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/ee1e1f6070e3/LICENSE))
- - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.30.1/LICENSE))
- - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket/blob/v1.8.10/LICENSE.txt))
+ - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE))
+ - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.30.3/LICENSE))
- [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE))
- [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE))
- [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE))
diff --git a/licenses/windows.md b/licenses/windows.md
index 9f640b830a95e..77281a7ab2ac3 100644
--- a/licenses/windows.md
+++ b/licenses/windows.md
@@ -57,23 +57,23 @@ Windows][]. See also the dependencies in the [Tailscale CLI][].
- [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE))
- [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE))
- [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE))
- - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/7601212d8e23/LICENSE))
+ - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/4327221bd339/LICENSE))
- [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/6580b55d49ca/LICENSE))
- - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE))
+ - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE))
- [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE))
- [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE))
- [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE))
- [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE))
- [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE))
- [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE))
- - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE))
- - [golang.org/x/exp/constraints](https://pkg.go.dev/golang.org/x/exp/constraints) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE))
+ - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE))
+ - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE))
- [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE))
- - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.18.0:LICENSE))
- - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE))
+ - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE))
+ - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE))
- [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE))
- - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE))
- - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE))
+ - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE))
+ - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE))
- [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE))
- [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2))
- [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3))
diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go
index 71093dd3c1987..f40ede86a7235 100644
--- a/logpolicy/logpolicy.go
+++ b/logpolicy/logpolicy.go
@@ -31,6 +31,7 @@ import (
"tailscale.com/atomicfile"
"tailscale.com/envknob"
"tailscale.com/health"
+ "tailscale.com/hostinfo"
"tailscale.com/log/filelogger"
"tailscale.com/logtail"
"tailscale.com/logtail/filch"
@@ -463,6 +464,11 @@ func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf
// The netMon parameter is optional. It should be specified in environments where
// Tailscaled is manipulating the routing table.
func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy {
+ if hostinfo.IsNATLabGuestVM() {
+ // In NATLab Gokrazy instances, tailscaled comes up concurently with
+ // DHCP and the doesn't have DNS for a while. Wait for DHCP first.
+ awaitGokrazyNetwork()
+ }
var lflags int
if term.IsTerminal(2) || runtime.GOOS == "windows" {
lflags = 0
@@ -816,3 +822,25 @@ func (noopPretendSuccessTransport) RoundTrip(req *http.Request) (*http.Response,
Status: "200 OK",
}, nil
}
+
+func awaitGokrazyNetwork() {
+ if runtime.GOOS != "linux" || distro.Get() != distro.Gokrazy {
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ for {
+ // Before DHCP finishes, the /etc/resolv.conf file has just "#MANUAL".
+ all, _ := os.ReadFile("/etc/resolv.conf")
+ if bytes.Contains(all, []byte("nameserver ")) {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(500 * time.Millisecond):
+ }
+ }
+}
diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go
new file mode 100644
index 0000000000000..e0a4b0a250768
--- /dev/null
+++ b/net/captivedetection/captivedetection.go
@@ -0,0 +1,223 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Package captivedetection provides a way to detect if the system is connected to a network that has
+// a captive portal. It does this by making HTTP requests to known captive portal detection endpoints
+// and checking if the HTTP responses indicate that a captive portal might be present.
+package captivedetection
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "tailscale.com/net/netmon"
+ "tailscale.com/tailcfg"
+ "tailscale.com/types/logger"
+)
+
+// Detector checks whether the system is behind a captive portal.
+type Detector struct {
+
+ // httpClient is the HTTP client that is used for captive portal detection. It is configured
+ // to not follow redirects, have a short timeout and no keep-alive.
+ httpClient *http.Client
+ // currIfIndex is the index of the interface that is currently being used by the httpClient.
+ currIfIndex int
+ // mu guards currIfIndex.
+ mu sync.Mutex
+ // logf is the logger used for logging messages. If it is nil, log.Printf is used.
+ logf logger.Logf
+}
+
+// NewDetector creates a new Detector instance for captive portal detection.
+func NewDetector(logf logger.Logf) *Detector {
+ d := &Detector{logf: logf}
+ d.httpClient = &http.Client{
+ // No redirects allowed
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ Transport: &http.Transport{
+ DialContext: d.dialContext,
+ DisableKeepAlives: true,
+ },
+ Timeout: Timeout,
+ }
+ return d
+}
+
+// Timeout is the timeout for captive portal detection requests. Because the captive portal intercepting our requests
+// is usually located on the LAN, this is a relatively short timeout.
+const Timeout = 3 * time.Second
+
+// Detect is the entry point to the API. It attempts to detect if the system is behind a captive portal
+// by making HTTP requests to known captive portal detection Endpoints. If any of the requests return a response code
+// or body that looks like a captive portal, Detect returns true. It returns false in all other cases, including when any
+// error occurs during a detection attempt.
+//
+// This function might take a while to return, as it will attempt to detect a captive portal on all available interfaces
+// by performing multiple HTTP requests. It should be called in a separate goroutine if you want to avoid blocking.
+func (d *Detector) Detect(ctx context.Context, netMon *netmon.Monitor, derpMap *tailcfg.DERPMap, preferredDERPRegionID int) (found bool) {
+ return d.detectCaptivePortalWithGOOS(ctx, netMon, derpMap, preferredDERPRegionID, runtime.GOOS)
+}
+
+func (d *Detector) detectCaptivePortalWithGOOS(ctx context.Context, netMon *netmon.Monitor, derpMap *tailcfg.DERPMap, preferredDERPRegionID int, goos string) (found bool) {
+ ifState := netMon.InterfaceState()
+ if !ifState.AnyInterfaceUp() {
+ d.logf("[v2] DetectCaptivePortal: no interfaces up, returning false")
+ return false
+ }
+
+ endpoints := availableEndpoints(derpMap, preferredDERPRegionID, d.logf, goos)
+
+ // Here we try detecting a captive portal using *all* available interfaces on the system
+ // that have a IPv4 address. We consider to have found a captive portal when any interface
+ // reports one may exists. This is necessary because most systems have multiple interfaces,
+ // and most importantly on macOS no default route interface is set until the user has accepted
+ // the captive portal alert thrown by the system. If no default route interface is known,
+ // we need to try with anything that might remotely resemble a Wi-Fi interface.
+ for ifName, i := range ifState.Interface {
+ if !i.IsUp() || i.IsLoopback() || interfaceNameDoesNotNeedCaptiveDetection(ifName, goos) {
+ continue
+ }
+ addrs, err := i.Addrs()
+ if err != nil {
+ d.logf("[v1] DetectCaptivePortal: failed to get addresses for interface %s: %v", ifName, err)
+ continue
+ }
+ if len(addrs) == 0 {
+ continue
+ }
+ d.logf("[v2] attempting to do captive portal detection on interface %s", ifName)
+ res := d.detectOnInterface(ctx, i.Index, endpoints)
+ if res {
+ d.logf("DetectCaptivePortal(found=true,ifName=%s)", ifName)
+ return true
+ }
+ }
+
+ d.logf("DetectCaptivePortal(found=false)")
+ return false
+}
+
+// interfaceNameDoesNotNeedCaptiveDetection returns true if an interface does not require captive portal detection
+// based on its name. This is useful to avoid making unnecessary HTTP requests on interfaces that are known to not
+// require it. We also avoid making requests on the interface prefixes "pdp" and "rmnet", which are cellular data
+// interfaces on iOS and Android, respectively, and would be needlessly battery-draining.
+func interfaceNameDoesNotNeedCaptiveDetection(ifName string, goos string) bool {
+ ifName = strings.ToLower(ifName)
+ excludedPrefixes := []string{"tailscale", "tun", "tap", "docker", "kube", "wg"}
+ if goos == "windows" {
+ excludedPrefixes = append(excludedPrefixes, "loopback", "tunnel", "ppp", "isatap", "teredo", "6to4")
+ } else if goos == "darwin" || goos == "ios" {
+ excludedPrefixes = append(excludedPrefixes, "pdp", "awdl", "bridge", "ap", "utun", "tap", "llw", "anpi", "lo", "stf", "gif", "xhc", "pktap")
+ } else if goos == "android" {
+ excludedPrefixes = append(excludedPrefixes, "rmnet", "p2p", "dummy", "sit")
+ }
+ for _, prefix := range excludedPrefixes {
+ if strings.HasPrefix(ifName, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// detectOnInterface reports whether or not we think the system is behind a
+// captive portal, detected by making a request to a URL that we know should
+// return a "204 No Content" response and checking if that's what we get.
+//
+// The boolean return is whether we think we have a captive portal.
+func (d *Detector) detectOnInterface(ctx context.Context, ifIndex int, endpoints []Endpoint) bool {
+ defer d.httpClient.CloseIdleConnections()
+
+ d.logf("[v2] %d available captive portal detection endpoints: %v", len(endpoints), endpoints)
+
+ // We try to detect the captive portal more quickly by making requests to multiple endpoints concurrently.
+ var wg sync.WaitGroup
+ resultCh := make(chan bool, len(endpoints))
+
+ for i, e := range endpoints {
+ if i >= 5 {
+ // Try a maximum of 5 endpoints, break out (returning false) if we run of attempts.
+ break
+ }
+ wg.Add(1)
+ go func(endpoint Endpoint) {
+ defer wg.Done()
+ found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, ifIndex)
+ if err != nil {
+ d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
+ return
+ }
+ if found {
+ resultCh <- true
+ }
+ }(e)
+ }
+
+ go func() {
+ wg.Wait()
+ close(resultCh)
+ }()
+
+ for result := range resultCh {
+ if result {
+ // If any of the endpoints seems to be a captive portal, we consider the system to be behind one.
+ return true
+ }
+ }
+
+ return false
+}
+
+// verifyCaptivePortalEndpoint checks if the given Endpoint is a captive portal by making an HTTP request to the
+// given Endpoint URL using the interface with index ifIndex, and checking if the response looks like a captive portal.
+func (d *Detector) verifyCaptivePortalEndpoint(ctx context.Context, e Endpoint, ifIndex int) (found bool, err error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", e.URL.String(), nil)
+ if err != nil {
+ return false, err
+ }
+
+ // Attach the Tailscale challenge header if the endpoint supports it. Not all captive portal detection endpoints
+ // support this, so we only attach it if the endpoint does.
+ if e.SupportsTailscaleChallenge {
+ // Note: the set of valid characters in a challenge and the total
+ // length is limited; see isChallengeChar in cmd/derper for more
+ // details.
+ chal := "ts_" + e.URL.Host
+ req.Header.Set("X-Tailscale-Challenge", chal)
+ }
+
+ d.mu.Lock()
+ d.currIfIndex = ifIndex
+ d.mu.Unlock()
+
+ // Make the actual request, and check if the response looks like a captive portal or not.
+ r, err := d.httpClient.Do(req)
+ if err != nil {
+ return false, err
+ }
+
+ return e.responseLooksLikeCaptive(r, d.logf), nil
+}
+
+func (d *Detector) dialContext(ctx context.Context, network, addr string) (net.Conn, error) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ ifIndex := d.currIfIndex
+
+ dl := net.Dialer{
+ Control: func(network, address string, c syscall.RawConn) error {
+ return setSocketInterfaceIndex(c, ifIndex, d.logf)
+ },
+ }
+
+ return dl.DialContext(ctx, network, addr)
+}
diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go
new file mode 100644
index 0000000000000..e74273afd922e
--- /dev/null
+++ b/net/captivedetection/captivedetection_test.go
@@ -0,0 +1,60 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package captivedetection
+
+import (
+ "context"
+ "runtime"
+ "sync"
+ "testing"
+
+ "tailscale.com/cmd/testwrapper/flakytest"
+ "tailscale.com/net/netmon"
+)
+
+func TestAvailableEndpointsAlwaysAtLeastTwo(t *testing.T) {
+ endpoints := availableEndpoints(nil, 0, t.Logf, runtime.GOOS)
+ if len(endpoints) == 0 {
+ t.Errorf("Expected non-empty AvailableEndpoints, got an empty slice instead")
+ }
+ if len(endpoints) == 1 {
+ t.Errorf("Expected at least two AvailableEndpoints for redundancy, got only one instead")
+ }
+ for _, e := range endpoints {
+ if e.URL.Scheme != "http" {
+ t.Errorf("Expected HTTP URL in Endpoint, got HTTPS")
+ }
+ }
+}
+
+func TestDetectCaptivePortalReturnsFalse(t *testing.T) {
+ d := NewDetector(t.Logf)
+ found := d.Detect(context.Background(), netmon.NewStatic(), nil, 0)
+ if found {
+ t.Errorf("DetectCaptivePortal returned true, expected false.")
+ }
+}
+
+func TestAllEndpointsAreUpAndReturnExpectedResponse(t *testing.T) {
+ flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13019")
+ d := NewDetector(t.Logf)
+ endpoints := availableEndpoints(nil, 0, t.Logf, runtime.GOOS)
+
+ var wg sync.WaitGroup
+ for _, e := range endpoints {
+ wg.Add(1)
+ go func(endpoint Endpoint) {
+ defer wg.Done()
+ found, err := d.verifyCaptivePortalEndpoint(context.Background(), endpoint, 0)
+ if err != nil {
+ t.Errorf("verifyCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err)
+ }
+ if found {
+ t.Errorf("verifyCaptivePortalEndpoint with endpoint %v says we're behind a captive portal, but we aren't", endpoint)
+ }
+ }(e)
+ }
+
+ wg.Wait()
+}
diff --git a/net/captivedetection/endpoints.go b/net/captivedetection/endpoints.go
new file mode 100644
index 0000000000000..450ed4a1cae4a
--- /dev/null
+++ b/net/captivedetection/endpoints.go
@@ -0,0 +1,178 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package captivedetection
+
+import (
+ "cmp"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "slices"
+
+ "go4.org/mem"
+ "tailscale.com/net/dnsfallback"
+ "tailscale.com/tailcfg"
+ "tailscale.com/types/logger"
+)
+
+// EndpointProvider is an enum that represents the source of an Endpoint.
+type EndpointProvider int
+
+const (
+ // DERPMapPreferred is used for an endpoint that is a DERP node contained in the current preferred DERP region,
+ // as provided by the DERPMap.
+ DERPMapPreferred EndpointProvider = iota
+ // DERPMapOther is used for an endpoint that is a DERP node, but not contained in the current preferred DERP region.
+ DERPMapOther
+ // Tailscale is used for endpoints that are the Tailscale coordination server or admin console.
+ Tailscale
+)
+
+func (p EndpointProvider) String() string {
+ switch p {
+ case DERPMapPreferred:
+ return "DERPMapPreferred"
+ case Tailscale:
+ return "Tailscale"
+ case DERPMapOther:
+ return "DERPMapOther"
+ default:
+ return fmt.Sprintf("EndpointProvider(%d)", p)
+ }
+}
+
+// Endpoint represents a URL that can be used to detect a captive portal, along with the expected
+// result of the HTTP request.
+type Endpoint struct {
+ // URL is the URL that we make an HTTP request to as part of the captive portal detection process.
+ URL *url.URL
+ // StatusCode is the expected HTTP status code that we expect to see in the response.
+ StatusCode int
+ // ExpectedContent is a string that we expect to see contained in the response body. If this is non-empty,
+ // we will check that the response body contains this string. If it is empty, we will not check the response body
+ // and only check the status code.
+ ExpectedContent string
+ // SupportsTailscaleChallenge is true if the endpoint will return the sent value of the X-Tailscale-Challenge
+ // HTTP header in its HTTP response.
+ SupportsTailscaleChallenge bool
+ // Provider is the source of the endpoint. This is used to prioritize certain endpoints over others
+ // (for example, a DERP node in the preferred region should always be used first).
+ Provider EndpointProvider
+}
+
+func (e Endpoint) String() string {
+ return fmt.Sprintf("Endpoint{URL=%q, StatusCode=%d, ExpectedContent=%q, SupportsTailscaleChallenge=%v, Provider=%s}", e.URL, e.StatusCode, e.ExpectedContent, e.SupportsTailscaleChallenge, e.Provider.String())
+}
+
+func (e Endpoint) Equal(other Endpoint) bool {
+ return e.URL.String() == other.URL.String() &&
+ e.StatusCode == other.StatusCode &&
+ e.ExpectedContent == other.ExpectedContent &&
+ e.SupportsTailscaleChallenge == other.SupportsTailscaleChallenge &&
+ e.Provider == other.Provider
+}
+
+// availableEndpoints returns a set of Endpoints which can be used for captive portal detection by performing
+// one or more HTTP requests and looking at the response. The returned Endpoints are ordered by preference,
+// with the most preferred Endpoint being the first in the slice.
+func availableEndpoints(derpMap *tailcfg.DERPMap, preferredDERPRegionID int, logf logger.Logf, goos string) []Endpoint {
+ endpoints := []Endpoint{}
+
+ if derpMap == nil || len(derpMap.Regions) == 0 {
+ // When the client first starts, we don't have a DERPMap in LocalBackend yet. In this case,
+ // we use the static DERPMap from dnsfallback.
+ logf("captivedetection: current DERPMap is empty, using map from dnsfallback")
+ derpMap = dnsfallback.GetDERPMap()
+ }
+ // Use the DERP IPs as captive portal detection endpoints. Using IPs is better than hostnames
+ // because they do not depend on DNS resolution.
+ for _, region := range derpMap.Regions {
+ if region.Avoid {
+ continue
+ }
+ for _, node := range region.Nodes {
+ if node.IPv4 == "" || !node.CanPort80 {
+ continue
+ }
+ str := "http://" + node.IPv4 + "/generate_204"
+ u, err := url.Parse(str)
+ if err != nil {
+ logf("captivedetection: failed to parse DERP node URL %q: %v", str, err)
+ continue
+ }
+ p := DERPMapOther
+ if region.RegionID == preferredDERPRegionID {
+ p = DERPMapPreferred
+ }
+ e := Endpoint{u, http.StatusNoContent, "", true, p}
+ endpoints = append(endpoints, e)
+ }
+ }
+
+ // Let's also try the default Tailscale coordination server and admin console.
+ // These are likely to be blocked on some networks.
+ appendTailscaleEndpoint := func(urlString string) {
+ u, err := url.Parse(urlString)
+ if err != nil {
+ logf("captivedetection: failed to parse Tailscale URL %q: %v", urlString, err)
+ return
+ }
+ endpoints = append(endpoints, Endpoint{u, http.StatusNoContent, "", false, Tailscale})
+ }
+ appendTailscaleEndpoint("http://controlplane.tailscale.com/generate_204")
+ appendTailscaleEndpoint("http://login.tailscale.com/generate_204")
+
+ // Sort the endpoints by provider so that we can prioritize DERP nodes in the preferred region, followed by
+ // any other DERP server elsewhere, then followed by Tailscale endpoints.
+ slices.SortFunc(endpoints, func(x, y Endpoint) int {
+ return cmp.Compare(x.Provider, y.Provider)
+ })
+
+ return endpoints
+}
+
+// responseLooksLikeCaptive checks if the given HTTP response matches the expected response for the Endpoint.
+func (e Endpoint) responseLooksLikeCaptive(r *http.Response, logf logger.Logf) bool {
+ defer r.Body.Close()
+
+ // Check the status code first.
+ if r.StatusCode != e.StatusCode {
+ logf("[v1] unexpected status code in captive portal response: want=%d, got=%d", e.StatusCode, r.StatusCode)
+ return true
+ }
+
+ // If the endpoint supports the Tailscale challenge header, check that the response contains the expected header.
+ if e.SupportsTailscaleChallenge {
+ expectedResponse := "response ts_" + e.URL.Host
+ hasResponse := r.Header.Get("X-Tailscale-Response") == expectedResponse
+ if !hasResponse {
+ // The response did not contain the expected X-Tailscale-Response header, which means we are most likely
+ // behind a captive portal (somebody is tampering with the response headers).
+ logf("captive portal check response did not contain expected X-Tailscale-Response header: want=%q, got=%q", expectedResponse, r.Header.Get("X-Tailscale-Response"))
+ return true
+ }
+ }
+
+ // If we don't have an expected content string, we don't need to check the response body.
+ if e.ExpectedContent == "" {
+ return false
+ }
+
+ // Read the response body and check if it contains the expected content.
+ b, err := io.ReadAll(io.LimitReader(r.Body, 4096))
+ if err != nil {
+ logf("reading captive portal check response body failed: %v", err)
+ return false
+ }
+ hasExpectedContent := mem.Contains(mem.B(b), mem.S(e.ExpectedContent))
+ if !hasExpectedContent {
+ // The response body did not contain the expected content, that means we are most likely behind a captive portal.
+ logf("[v1] captive portal check response body did not contain expected content: want=%q", e.ExpectedContent)
+ return true
+ }
+
+ // If we got here, the response looks good.
+ return false
+}
diff --git a/net/captivedetection/rawconn.go b/net/captivedetection/rawconn.go
new file mode 100644
index 0000000000000..a7197d9df2577
--- /dev/null
+++ b/net/captivedetection/rawconn.go
@@ -0,0 +1,19 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build !(ios || darwin)
+
+package captivedetection
+
+import (
+ "syscall"
+
+ "tailscale.com/types/logger"
+)
+
+// setSocketInterfaceIndex sets the IP_BOUND_IF socket option on the given RawConn.
+// This forces the socket to use the given interface.
+func setSocketInterfaceIndex(c syscall.RawConn, ifIndex int, logf logger.Logf) error {
+ // No-op on non-Darwin platforms.
+ return nil
+}
diff --git a/net/captivedetection/rawconn_apple.go b/net/captivedetection/rawconn_apple.go
new file mode 100644
index 0000000000000..12b4446e62eb8
--- /dev/null
+++ b/net/captivedetection/rawconn_apple.go
@@ -0,0 +1,24 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+//go:build ios || darwin
+
+package captivedetection
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+ "tailscale.com/types/logger"
+)
+
+// setSocketInterfaceIndex sets the IP_BOUND_IF socket option on the given RawConn.
+// This forces the socket to use the given interface.
+func setSocketInterfaceIndex(c syscall.RawConn, ifIndex int, logf logger.Logf) error {
+ return c.Control((func(fd uintptr) {
+ err := unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_BOUND_IF, ifIndex)
+ if err != nil {
+ logf("captivedetection: failed to set IP_BOUND_IF (ifIndex=%d): %v", ifIndex, err)
+ }
+ }))
+}
diff --git a/net/dns/manager.go b/net/dns/manager.go
index eee2d5a7d14df..dfce5b2acaf82 100644
--- a/net/dns/manager.go
+++ b/net/dns/manager.go
@@ -82,7 +82,7 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker,
m := &Manager{
logf: logf,
- resolver: resolver.New(logf, linkSel, dialer, knobs),
+ resolver: resolver.New(logf, linkSel, dialer, health, knobs),
os: oscfg,
health: health,
knobs: knobs,
@@ -538,7 +538,9 @@ func (m *Manager) FlushCaches() error {
// CleanUp restores the system DNS configuration to its original state
// in case the Tailscale daemon terminated without closing the router.
// No other state needs to be instantiated before this runs.
-func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) {
+//
+// health must not be nil
+func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) {
oscfg, err := NewOSConfigurator(logf, nil, nil, interfaceName)
if err != nil {
logf("creating dns cleanup: %v", err)
@@ -546,7 +548,7 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) {
}
d := &tsdial.Dialer{Logf: logf}
d.SetNetMon(netMon)
- dns := NewManager(logf, oscfg, nil, d, nil, nil, runtime.GOOS)
+ dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS)
if err := dns.Down(); err != nil {
logf("dns down: %v", err)
}
diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go
index 2982c61290622..f4c42791e9b5b 100644
--- a/net/dns/manager_tcp_test.go
+++ b/net/dns/manager_tcp_test.go
@@ -15,6 +15,7 @@ import (
"github.com/google/go-cmp/cmp"
dns "golang.org/x/net/dns/dnsmessage"
+ "tailscale.com/health"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
"tailscale.com/tstest"
@@ -88,7 +89,7 @@ func TestDNSOverTCP(t *testing.T) {
SearchDomains: fqdns("coffee.shop"),
},
}
- m := NewManager(t.Logf, &f, nil, tsdial.NewDialer(netmon.NewStatic()), nil, nil, "")
+ m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "")
m.resolver.TestOnlySetHook(f.SetResolver)
m.Set(Config{
Hosts: hosts(
@@ -173,7 +174,7 @@ func TestDNSOverTCP_TooLarge(t *testing.T) {
SearchDomains: fqdns("coffee.shop"),
},
}
- m := NewManager(log, &f, nil, tsdial.NewDialer(netmon.NewStatic()), nil, nil, "")
+ m := NewManager(log, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "")
m.resolver.TestOnlySetHook(f.SetResolver)
m.Set(Config{
Hosts: hosts("andrew.ts.com.", "1.2.3.4"),
diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go
index ca3227aab3bf1..c528175214f67 100644
--- a/net/dns/resolver/forwarder.go
+++ b/net/dns/resolver/forwarder.go
@@ -6,6 +6,8 @@ package resolver
import (
"bytes"
"context"
+ "crypto/sha256"
+ "encoding/base64"
"encoding/binary"
"errors"
"fmt"
@@ -23,6 +25,7 @@ import (
dns "golang.org/x/net/dns/dnsmessage"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
+ "tailscale.com/health"
"tailscale.com/net/dns/publicdns"
"tailscale.com/net/dnscache"
"tailscale.com/net/neterror"
@@ -164,6 +167,23 @@ func clampEDNSSize(packet []byte, maxSize uint16) {
binary.BigEndian.PutUint16(opt[3:5], maxSize)
}
+// dnsForwarderFailing should be raised when the forwarder is unable to reach the
+// upstream resolvers. This is a high severity warning as it results in "no internet".
+// This warning must be cleared when the forwarder is working again.
+//
+// We allow for 5 second grace period to ensure this is not raised for spurious errors
+// under the assumption that DNS queries are relatively frequent and a subsequent
+// successful query will clear any one-off errors.
+var dnsForwarderFailing = health.Register(&health.Warnable{
+ Code: "dns-forward-failing",
+ Title: "DNS unavailable",
+ Severity: health.SeverityMedium,
+ DependsOn: []*health.Warnable{health.NetworkStatusWarnable},
+ Text: health.StaticMessage("Tailscale can't reach the configured DNS servers. Internet connectivity may be affected."),
+ ImpactsConnectivity: true,
+ TimeToVisible: 5 * time.Second,
+})
+
type route struct {
Suffix dnsname.FQDN
Resolvers []resolverAndDelay
@@ -188,6 +208,7 @@ type forwarder struct {
netMon *netmon.Monitor // always non-nil
linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it
dialer *tsdial.Dialer
+ health *health.Tracker // always non-nil
controlKnobs *controlknobs.Knobs // or nil
@@ -219,7 +240,7 @@ type forwarder struct {
missingUpstreamRecovery func()
}
-func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, knobs *controlknobs.Knobs) *forwarder {
+func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder {
if netMon == nil {
panic("nil netMon")
}
@@ -228,6 +249,7 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS
netMon: netMon,
linkSel: linkSel,
dialer: dialer,
+ health: health,
controlKnobs: knobs,
missingUpstreamRecovery: func() {},
}
@@ -478,9 +500,10 @@ var (
func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) (ret []byte, err error) {
if verboseDNSForward() {
id := forwarderCount.Add(1)
- f.logf("forwarder.send(%q) [%d] ...", rr.name.Addr, id)
+ domain, typ, _ := nameFromQuery(fq.packet)
+ f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id)
defer func() {
- f.logf("forwarder.send(%q) [%d] = %v, %v", rr.name.Addr, id, len(ret), err)
+ f.logf("forwarder.send(%q, %d, %v, %d) [%d] = %v, %v", rr.name.Addr, fq.txid, typ, len(domain), id, len(ret), err)
}()
}
if strings.HasPrefix(rr.name.Addr, "http://") {
@@ -846,7 +869,7 @@ type forwardQuery struct {
// node DNS proxy queries), otherwise f.resolvers is used.
func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, responseChan chan<- packet, resolvers ...resolverAndDelay) error {
metricDNSFwd.Add(1)
- domain, err := nameFromQuery(query.bs)
+ domain, typ, err := nameFromQuery(query.bs)
if err != nil {
metricDNSFwdErrorName.Add(1)
return err
@@ -887,6 +910,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
resolvers = f.resolvers(domain)
if len(resolvers) == 0 {
metricDNSFwdErrorNoUpstream.Add(1)
+ f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""})
f.logf("no upstream resolvers set, returning SERVFAIL")
// Attempt to recompile the DNS configuration
@@ -909,6 +933,8 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
case responseChan <- res:
return nil
}
+ } else {
+ f.health.SetHealthy(dnsForwarderFailing)
}
}
@@ -920,6 +946,12 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
}
defer fq.closeOnCtxDone.Close()
+ if verboseDNSForward() {
+ domainSha256 := sha256.Sum256([]byte(domain))
+ domainSig := base64.RawStdEncoding.EncodeToString(domainSha256[:3])
+ f.logf("request(%d, %v, %d, %s) %d...", fq.txid, typ, len(domain), domainSig, len(fq.packet))
+ }
+
resc := make(chan []byte, 1) // it's fine buffered or not
errc := make(chan error, 1) // it's fine buffered or not too
for i := range resolvers {
@@ -959,7 +991,11 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
metricDNSFwdErrorContext.Add(1)
return fmt.Errorf("waiting to send response: %w", ctx.Err())
case responseChan <- packet{v, query.family, query.addr}:
+ if verboseDNSForward() {
+ f.logf("response(%d, %v, %d) = %d, nil", fq.txid, typ, len(domain), len(v))
+ }
metricDNSFwdSuccess.Add(1)
+ f.health.SetHealthy(dnsForwarderFailing)
return nil
}
case err := <-errc:
@@ -979,7 +1015,15 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
case <-ctx.Done():
metricDNSFwdErrorContext.Add(1)
metricDNSFwdErrorContextGotError.Add(1)
+ var resolverAddrs []string
+ for _, rr := range resolvers {
+ resolverAddrs = append(resolverAddrs, rr.name.Addr)
+ }
+ f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")})
case responseChan <- res:
+ if verboseDNSForward() {
+ f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr)
+ }
}
}
return firstErr
@@ -999,6 +1043,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
for _, rr := range resolvers {
resolverAddrs = append(resolverAddrs, rr.name.Addr)
}
+ f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")})
return fmt.Errorf("waiting for response or error from %v: %w", resolverAddrs, ctx.Err())
}
}
@@ -1007,24 +1052,28 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
var initListenConfig func(_ *net.ListenConfig, _ *netmon.Monitor, tunName string) error
// nameFromQuery extracts the normalized query name from bs.
-func nameFromQuery(bs []byte) (dnsname.FQDN, error) {
+func nameFromQuery(bs []byte) (dnsname.FQDN, dns.Type, error) {
var parser dns.Parser
hdr, err := parser.Start(bs)
if err != nil {
- return "", err
+ return "", 0, err
}
if hdr.Response {
- return "", errNotQuery
+ return "", 0, errNotQuery
}
q, err := parser.Question()
if err != nil {
- return "", err
+ return "", 0, err
}
n := q.Name.Data[:q.Name.Length]
- return dnsname.ToFQDN(rawNameToLower(n))
+ fqdn, err := dnsname.ToFQDN(rawNameToLower(n))
+ if err != nil {
+ return "", 0, err
+ }
+ return fqdn, q.Type, nil
}
// nxDomainResponse returns an NXDomain DNS reply for the provided request.
diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go
index e723af620d100..465618a54596c 100644
--- a/net/dns/resolver/forwarder_test.go
+++ b/net/dns/resolver/forwarder_test.go
@@ -24,6 +24,7 @@ import (
dns "golang.org/x/net/dns/dnsmessage"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
+ "tailscale.com/health"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
"tailscale.com/types/dnstype"
@@ -200,7 +201,7 @@ func BenchmarkNameFromQuery(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for range b.N {
- _, err := nameFromQuery(msg)
+ _, _, err := nameFromQuery(msg)
if err != nil {
b.Fatal(err)
}
@@ -457,7 +458,7 @@ func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwa
var dialer tsdial.Dialer
dialer.SetNetMon(netMon)
- fwd := newForwarder(tb.Logf, netMon, nil, &dialer, nil)
+ fwd := newForwarder(tb.Logf, netMon, nil, &dialer, new(health.Tracker), nil)
if modify != nil {
modify(fwd)
}
diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go
index a3f3d7010ab81..90e447020ed79 100644
--- a/net/dns/resolver/tsdns.go
+++ b/net/dns/resolver/tsdns.go
@@ -25,6 +25,7 @@ import (
dns "golang.org/x/net/dns/dnsmessage"
"tailscale.com/control/controlknobs"
"tailscale.com/envknob"
+ "tailscale.com/health"
"tailscale.com/net/dns/resolvconffile"
"tailscale.com/net/netaddr"
"tailscale.com/net/netmon"
@@ -202,6 +203,7 @@ type Resolver struct {
logf logger.Logf
netMon *netmon.Monitor // non-nil
dialer *tsdial.Dialer // non-nil
+ health *health.Tracker // non-nil
saveConfigForTests func(cfg Config) // used in tests to capture resolver config
// forwarder forwards requests to upstream nameservers.
forwarder *forwarder
@@ -224,10 +226,14 @@ type ForwardLinkSelector interface {
}
// New returns a new resolver.
-func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, knobs *controlknobs.Knobs) *Resolver {
+// dialer and health must be non-nil.
+func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *Resolver {
if dialer == nil {
panic("nil Dialer")
}
+ if health == nil {
+ panic("nil health")
+ }
netMon := dialer.NetMon()
if netMon == nil {
logf("nil netMon")
@@ -239,8 +245,9 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, k
hostToIP: map[dnsname.FQDN][]netip.Addr{},
ipToHost: map[netip.Addr]dnsname.FQDN{},
dialer: dialer,
+ health: health,
}
- r.forwarder = newForwarder(r.logf, netMon, linkSel, dialer, knobs)
+ r.forwarder = newForwarder(r.logf, netMon, linkSel, dialer, health, knobs)
return r
}
diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go
index e1477e34294ac..e2c4750b5c1a3 100644
--- a/net/dns/resolver/tsdns_test.go
+++ b/net/dns/resolver/tsdns_test.go
@@ -23,6 +23,7 @@ import (
miekdns "github.com/miekg/dns"
dns "golang.org/x/net/dns/dnsmessage"
+ "tailscale.com/health"
"tailscale.com/net/netaddr"
"tailscale.com/net/netmon"
"tailscale.com/net/tsdial"
@@ -354,6 +355,7 @@ func newResolver(t testing.TB) *Resolver {
return New(t.Logf,
nil, // no link selector
tsdial.NewDialer(netmon.NewStatic()),
+ new(health.Tracker),
nil, // no control knobs
)
}
@@ -1068,7 +1070,7 @@ func TestForwardLinkSelection(t *testing.T) {
return "special"
}
return ""
- }), new(tsdial.Dialer), nil /* no control knobs */)
+ }), new(tsdial.Dialer), new(health.Tracker), nil /* no control knobs */)
// Test non-special IP.
if got, err := fwd.packetListener(netip.Addr{}); err != nil {
diff --git a/net/dnsfallback/dns-fallback-servers.json b/net/dnsfallback/dns-fallback-servers.json
index 6b878014783c9..42dd0713b6b5d 100644
--- a/net/dnsfallback/dns-fallback-servers.json
+++ b/net/dnsfallback/dns-fallback-servers.json
@@ -10,21 +10,24 @@
"RegionID": 1,
"HostName": "derp1c.tailscale.com",
"IPv4": "104.248.8.210",
- "IPv6": "2604:a880:800:10::7a0:e001"
+ "IPv6": "2604:a880:800:10::7a0:e001",
+ "CanPort80": true
},
{
"Name": "1d",
"RegionID": 1,
"HostName": "derp1d.tailscale.com",
"IPv4": "165.22.33.71",
- "IPv6": "2604:a880:800:10::7fe:f001"
+ "IPv6": "2604:a880:800:10::7fe:f001",
+ "CanPort80": true
},
{
"Name": "1e",
"RegionID": 1,
"HostName": "derp1e.tailscale.com",
"IPv4": "64.225.56.166",
- "IPv6": "2604:a880:800:10::873:4001"
+ "IPv6": "2604:a880:800:10::873:4001",
+ "CanPort80": true
}
]
},
@@ -38,7 +41,8 @@
"RegionID": 10,
"HostName": "derp10.tailscale.com",
"IPv4": "137.220.36.168",
- "IPv6": "2001:19f0:8001:2d9:5400:2ff:feef:bbb1"
+ "IPv6": "2001:19f0:8001:2d9:5400:2ff:feef:bbb1",
+ "CanPort80": true
}
]
},
@@ -52,7 +56,8 @@
"RegionID": 11,
"HostName": "derp11.tailscale.com",
"IPv4": "18.230.97.74",
- "IPv6": "2600:1f1e:ee4:5611:ec5c:1736:d43b:a454"
+ "IPv6": "2600:1f1e:ee4:5611:ec5c:1736:d43b:a454",
+ "CanPort80": true
}
]
},
@@ -66,21 +71,24 @@
"RegionID": 12,
"HostName": "derp12.tailscale.com",
"IPv4": "216.128.144.130",
- "IPv6": "2001:19f0:5c01:289:5400:3ff:fe8d:cb5e"
+ "IPv6": "2001:19f0:5c01:289:5400:3ff:fe8d:cb5e",
+ "CanPort80": true
},
{
"Name": "12b",
"RegionID": 12,
"HostName": "derp12b.tailscale.com",
"IPv4": "45.63.71.144",
- "IPv6": "2001:19f0:5c01:48a:5400:3ff:fe8d:cb5f"
+ "IPv6": "2001:19f0:5c01:48a:5400:3ff:fe8d:cb5f",
+ "CanPort80": true
},
{
"Name": "12c",
"RegionID": 12,
"HostName": "derp12c.tailscale.com",
"IPv4": "149.28.119.105",
- "IPv6": "2001:19f0:5c01:2cb:5400:3ff:fe8d:cb60"
+ "IPv6": "2001:19f0:5c01:2cb:5400:3ff:fe8d:cb60",
+ "CanPort80": true
}
]
},
@@ -94,21 +102,24 @@
"RegionID": 2,
"HostName": "derp2d.tailscale.com",
"IPv4": "192.73.252.65",
- "IPv6": "2607:f740:0:3f::287"
+ "IPv6": "2607:f740:0:3f::287",
+ "CanPort80": true
},
{
"Name": "2e",
"RegionID": 2,
"HostName": "derp2e.tailscale.com",
"IPv4": "192.73.252.134",
- "IPv6": "2607:f740:0:3f::44c"
+ "IPv6": "2607:f740:0:3f::44c",
+ "CanPort80": true
},
{
"Name": "2f",
"RegionID": 2,
"HostName": "derp2f.tailscale.com",
"IPv4": "208.111.34.178",
- "IPv6": "2607:f740:0:3f::f4"
+ "IPv6": "2607:f740:0:3f::f4",
+ "CanPort80": true
}
]
},
@@ -122,7 +133,8 @@
"RegionID": 3,
"HostName": "derp3.tailscale.com",
"IPv4": "68.183.179.66",
- "IPv6": "2400:6180:0:d1::67d:8001"
+ "IPv6": "2400:6180:0:d1::67d:8001",
+ "CanPort80": true
}
]
},
@@ -136,21 +148,24 @@
"RegionID": 4,
"HostName": "derp4c.tailscale.com",
"IPv4": "134.122.77.138",
- "IPv6": "2a03:b0c0:3:d0::1501:6001"
+ "IPv6": "2a03:b0c0:3:d0::1501:6001",
+ "CanPort80": true
},
{
"Name": "4d",
"RegionID": 4,
"HostName": "derp4d.tailscale.com",
"IPv4": "134.122.94.167",
- "IPv6": "2a03:b0c0:3:d0::1501:b001"
+ "IPv6": "2a03:b0c0:3:d0::1501:b001",
+ "CanPort80": true
},
{
"Name": "4e",
"RegionID": 4,
"HostName": "derp4e.tailscale.com",
"IPv4": "134.122.74.153",
- "IPv6": "2a03:b0c0:3:d0::29:9001"
+ "IPv6": "2a03:b0c0:3:d0::29:9001",
+ "CanPort80": true
}
]
},
@@ -164,7 +179,8 @@
"RegionID": 5,
"HostName": "derp5.tailscale.com",
"IPv4": "103.43.75.49",
- "IPv6": "2001:19f0:5801:10b7:5400:2ff:feaa:284c"
+ "IPv6": "2001:19f0:5801:10b7:5400:2ff:feaa:284c",
+ "CanPort80": true
}
]
},
@@ -178,7 +194,8 @@
"RegionID": 6,
"HostName": "derp6.tailscale.com",
"IPv4": "68.183.90.120",
- "IPv6": "2400:6180:100:d0::982:d001"
+ "IPv6": "2400:6180:100:d0::982:d001",
+ "CanPort80": true
}
]
},
@@ -192,7 +209,8 @@
"RegionID": 7,
"HostName": "derp7.tailscale.com",
"IPv4": "167.179.89.145",
- "IPv6": "2401:c080:1000:467f:5400:2ff:feee:22aa"
+ "IPv6": "2401:c080:1000:467f:5400:2ff:feee:22aa",
+ "CanPort80": true
}
]
},
@@ -206,21 +224,24 @@
"RegionID": 8,
"HostName": "derp8b.tailscale.com",
"IPv4": "46.101.74.201",
- "IPv6": "2a03:b0c0:1:d0::ec1:e001"
+ "IPv6": "2a03:b0c0:1:d0::ec1:e001",
+ "CanPort80": true
},
{
"Name": "8c",
"RegionID": 8,
"HostName": "derp8c.tailscale.com",
"IPv4": "206.189.16.32",
- "IPv6": "2a03:b0c0:1:d0::e1f:4001"
+ "IPv6": "2a03:b0c0:1:d0::e1f:4001",
+ "CanPort80": true
},
{
"Name": "8d",
"RegionID": 8,
"HostName": "derp8d.tailscale.com",
"IPv4": "178.62.44.132",
- "IPv6": "2a03:b0c0:1:d0::e08:e001"
+ "IPv6": "2a03:b0c0:1:d0::e08:e001",
+ "CanPort80": true
}
]
},
@@ -234,21 +255,24 @@
"RegionID": 9,
"HostName": "derp9.tailscale.com",
"IPv4": "207.148.3.137",
- "IPv6": "2001:19f0:6401:1d9c:5400:2ff:feef:bb82"
+ "IPv6": "2001:19f0:6401:1d9c:5400:2ff:feef:bb82",
+ "CanPort80": true
},
{
"Name": "9b",
"RegionID": 9,
"HostName": "derp9b.tailscale.com",
"IPv4": "144.202.67.195",
- "IPv6": "2001:19f0:6401:eb5:5400:3ff:fe8d:6d9b"
+ "IPv6": "2001:19f0:6401:eb5:5400:3ff:fe8d:6d9b",
+ "CanPort80": true
},
{
"Name": "9c",
"RegionID": 9,
"HostName": "derp9c.tailscale.com",
"IPv4": "155.138.243.219",
- "IPv6": "2001:19f0:6401:fe7:5400:3ff:fe8d:6d9c"
+ "IPv6": "2001:19f0:6401:fe7:5400:3ff:fe8d:6d9c",
+ "CanPort80": true
}
]
}
diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go
index 6b3ac864edd50..4c5d5fa2f2743 100644
--- a/net/dnsfallback/dnsfallback.go
+++ b/net/dnsfallback/dnsfallback.go
@@ -219,7 +219,7 @@ func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Track
ip netip.Addr
}
- dm := getDERPMap()
+ dm := GetDERPMap()
var cands4, cands6 []nameIP
for _, dr := range dm.Regions {
@@ -281,6 +281,7 @@ func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Track
func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr, queryName string, logf logger.Logf, ht *health.Tracker, netMon *netmon.Monitor) (dnsMap, error) {
dialer := netns.NewDialer(logf, netMon)
tr := http.DefaultTransport.(*http.Transport).Clone()
+ tr.DisableKeepAlives = true // This transport is meant to be used once.
tr.Proxy = tshttpproxy.ProxyFromEnvironment
tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) {
return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443"))
@@ -310,9 +311,12 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr
// https://derp10.tailscale.com/bootstrap-dns
type dnsMap map[string][]netip.Addr
-// getDERPMap returns some DERP map. The DERP servers also run a fallback
-// DNS server.
-func getDERPMap() *tailcfg.DERPMap {
+// GetDERPMap returns a fallback DERP map that is always available, useful for basic
+// bootstrapping purposes. The dynamically updated DERP map in LocalBackend should
+// always be preferred over this. Use this DERP map only when the control plane is
+// unreachable or hasn't been reached yet. The DERP servers in the returned map also
+// run a fallback DNS server.
+func GetDERPMap() *tailcfg.DERPMap {
dm := getStaticDERPMap()
// Merge in any DERP servers from the cached map that aren't in the
diff --git a/net/dnsfallback/dnsfallback_test.go b/net/dnsfallback/dnsfallback_test.go
index 4298499b0189e..16f5027d4850f 100644
--- a/net/dnsfallback/dnsfallback_test.go
+++ b/net/dnsfallback/dnsfallback_test.go
@@ -18,7 +18,7 @@ import (
)
func TestGetDERPMap(t *testing.T) {
- dm := getDERPMap()
+ dm := GetDERPMap()
if dm == nil {
t.Fatal("nil")
}
@@ -78,7 +78,7 @@ func TestCache(t *testing.T) {
}
// Verify that our DERP map is merged with the cache.
- dm := getDERPMap()
+ dm := GetDERPMap()
region, ok := dm.Regions[99]
if !ok {
t.Fatal("expected region 99")
diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go
index 80957039e3ca9..8eb50a61dd340 100644
--- a/net/netcheck/netcheck.go
+++ b/net/netcheck/netcheck.go
@@ -14,13 +14,11 @@ import (
"io"
"log"
"maps"
- "math/rand/v2"
"net"
"net/http"
"net/netip"
"runtime"
"sort"
- "strings"
"sync"
"syscall"
"time"
@@ -28,6 +26,7 @@ import (
"github.com/tcnksm/go-httpstat"
"tailscale.com/derp/derphttp"
"tailscale.com/envknob"
+ "tailscale.com/net/captivedetection"
"tailscale.com/net/dnscache"
"tailscale.com/net/neterror"
"tailscale.com/net/netmon"
@@ -847,11 +846,8 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe
tmr := time.AfterFunc(c.captivePortalDelay(), func() {
defer close(ch)
- found, err := c.checkCaptivePortal(ctx, dm, preferredDERP)
- if err != nil {
- c.logf("[v1] checkCaptivePortal: %v", err)
- return
- }
+ d := captivedetection.NewDetector(c.logf)
+ found := d.Detect(ctx, c.NetMon, dm, preferredDERP)
rs.report.CaptivePortal.Set(found)
})
@@ -988,75 +984,6 @@ func (c *Client) finishAndStoreReport(rs *reportState, dm *tailcfg.DERPMap) *Rep
return report
}
-var noRedirectClient = &http.Client{
- // No redirects allowed
- CheckRedirect: func(req *http.Request, via []*http.Request) error {
- return http.ErrUseLastResponse
- },
-
- // Remaining fields are the same as the default client.
- Transport: http.DefaultClient.Transport,
- Jar: http.DefaultClient.Jar,
- Timeout: http.DefaultClient.Timeout,
-}
-
-// checkCaptivePortal reports whether or not we think the system is behind a
-// captive portal, detected by making a request to a URL that we know should
-// return a "204 No Content" response and checking if that's what we get.
-//
-// The boolean return is whether we think we have a captive portal.
-func (c *Client) checkCaptivePortal(ctx context.Context, dm *tailcfg.DERPMap, preferredDERP int) (bool, error) {
- defer noRedirectClient.CloseIdleConnections()
-
- // If we have a preferred DERP region with more than one node, try
- // that; otherwise, pick a random one not marked as "Avoid".
- if preferredDERP == 0 || dm.Regions[preferredDERP] == nil ||
- (preferredDERP != 0 && len(dm.Regions[preferredDERP].Nodes) == 0) {
- rids := make([]int, 0, len(dm.Regions))
- for id, reg := range dm.Regions {
- if reg == nil || reg.Avoid || len(reg.Nodes) == 0 {
- continue
- }
- rids = append(rids, id)
- }
- if len(rids) == 0 {
- return false, nil
- }
- preferredDERP = rids[rand.IntN(len(rids))]
- }
-
- node := dm.Regions[preferredDERP].Nodes[0]
-
- if strings.HasSuffix(node.HostName, tailcfg.DotInvalid) {
- // Don't try to connect to invalid hostnames. This occurred in tests:
- // https://github.com/tailscale/tailscale/issues/6207
- // TODO(bradfitz,andrew-d): how to actually handle this nicely?
- return false, nil
- }
-
- req, err := http.NewRequestWithContext(ctx, "GET", "http://"+node.HostName+"/generate_204", nil)
- if err != nil {
- return false, err
- }
-
- // Note: the set of valid characters in a challenge and the total
- // length is limited; see isChallengeChar in cmd/derper for more
- // details.
- chal := "ts_" + node.HostName
- req.Header.Set("X-Tailscale-Challenge", chal)
- r, err := noRedirectClient.Do(req)
- if err != nil {
- return false, err
- }
- defer r.Body.Close()
-
- expectedResponse := "response " + chal
- validResponse := r.Header.Get("X-Tailscale-Response") == expectedResponse
-
- c.logf("[v2] checkCaptivePortal url=%q status_code=%d valid_response=%v", req.URL.String(), r.StatusCode, validResponse)
- return r.StatusCode != 204 || !validResponse, nil
-}
-
// runHTTPOnlyChecks is the netcheck done by environments that can
// only do HTTP requests, such as ws/wasm.
func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *reportState, dm *tailcfg.DERPMap) error {
diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go
index 8b71247449244..26e52602afaa5 100644
--- a/net/netcheck/netcheck_test.go
+++ b/net/netcheck/netcheck_test.go
@@ -15,14 +15,12 @@ import (
"sort"
"strconv"
"strings"
- "sync/atomic"
"testing"
"time"
"tailscale.com/net/netmon"
"tailscale.com/net/stun/stuntest"
"tailscale.com/tailcfg"
- "tailscale.com/tstest"
"tailscale.com/tstest/nettest"
)
@@ -778,54 +776,6 @@ func TestSortRegions(t *testing.T) {
}
}
-func TestNoCaptivePortalWhenUDP(t *testing.T) {
- nettest.SkipIfNoNetwork(t) // empirically. not sure why.
-
- // Override noRedirectClient to handle the /generate_204 endpoint
- var generate204Called atomic.Bool
- tr := RoundTripFunc(func(req *http.Request) *http.Response {
- if !strings.HasSuffix(req.URL.String(), "/generate_204") {
- panic("bad URL: " + req.URL.String())
- }
- generate204Called.Store(true)
- return &http.Response{
- StatusCode: http.StatusNoContent,
- Header: make(http.Header),
- }
- })
-
- tstest.Replace(t, &noRedirectClient.Transport, http.RoundTripper(tr))
-
- stunAddr, cleanup := stuntest.Serve(t)
- defer cleanup()
-
- c := newTestClient(t)
- c.testEnoughRegions = 1
- // Set the delay long enough that we have time to cancel it
- // when our STUN probe succeeds.
- c.testCaptivePortalDelay = 10 * time.Second
-
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
- defer cancel()
-
- if err := c.Standalone(ctx, "127.0.0.1:0"); err != nil {
- t.Fatal(err)
- }
-
- r, err := c.GetReport(ctx, stuntest.DERPMapOf(stunAddr.String()), nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // Should not have called our captive portal function.
- if generate204Called.Load() {
- t.Errorf("captive portal check called; expected no call")
- }
- if r.CaptivePortal != "" {
- t.Errorf("got CaptivePortal=%q, want empty", r.CaptivePortal)
- }
-}
-
type RoundTripFunc func(req *http.Request) *http.Response
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go
index 55e29cc29a001..ac5e89d76cc2e 100644
--- a/net/netns/netns_darwin.go
+++ b/net/netns/netns_darwin.go
@@ -92,7 +92,9 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string)
// If the address doesn't parse, use the default index.
addr, err := parseAddress(address)
if err != nil {
- logf("[unexpected] netns: error parsing address %q: %v", address, err)
+ if err != errUnspecifiedHost {
+ logf("[unexpected] netns: error parsing address %q: %v", address, err)
+ }
return defaultIdx()
}
diff --git a/net/netns/netns_dw.go b/net/netns/netns_dw.go
index bf654666fcda8..f92ba9462c32a 100644
--- a/net/netns/netns_dw.go
+++ b/net/netns/netns_dw.go
@@ -6,16 +6,22 @@
package netns
import (
+ "errors"
"net"
"net/netip"
)
+var errUnspecifiedHost = errors.New("unspecified host")
+
func parseAddress(address string) (addr netip.Addr, err error) {
host, _, err := net.SplitHostPort(address)
if err != nil {
// error means the string didn't contain a port number, so use the string directly
host = address
}
+ if host == "" {
+ return addr, errUnspecifiedHost
+ }
return netip.ParseAddr(host)
}
diff --git a/net/netns/netns_windows.go b/net/netns/netns_windows.go
index 3c4bafbb548a6..afbda0f47ece6 100644
--- a/net/netns/netns_windows.go
+++ b/net/netns/netns_windows.go
@@ -86,23 +86,26 @@ func controlC(logf logger.Logf, network, address string, c syscall.RawConn) (err
var ifaceIdxV4, ifaceIdxV6 uint32
if useRoute := bindToInterfaceByRoute.Load() || bindToInterfaceByRouteEnv(); useRoute {
addr, err := parseAddress(address)
- if err != nil {
- return fmt.Errorf("parseAddress: %w", err)
- }
-
- if canV4 && (addr.Is4() || addr.Is4In6()) {
- addrV4 := addr.Unmap()
- ifaceIdxV4, err = getInterfaceIndex(logf, addrV4, defIfaceIdxV4)
- if err != nil {
- return fmt.Errorf("getInterfaceIndex(%v): %w", addrV4, err)
+ if err == nil {
+ if canV4 && (addr.Is4() || addr.Is4In6()) {
+ addrV4 := addr.Unmap()
+ ifaceIdxV4, err = getInterfaceIndex(logf, addrV4, defIfaceIdxV4)
+ if err != nil {
+ return fmt.Errorf("getInterfaceIndex(%v): %w", addrV4, err)
+ }
}
- }
- if canV6 && addr.Is6() {
- ifaceIdxV6, err = getInterfaceIndex(logf, addr, defIfaceIdxV6)
- if err != nil {
- return fmt.Errorf("getInterfaceIndex(%v): %w", addr, err)
+ if canV6 && addr.Is6() {
+ ifaceIdxV6, err = getInterfaceIndex(logf, addr, defIfaceIdxV6)
+ if err != nil {
+ return fmt.Errorf("getInterfaceIndex(%v): %w", addr, err)
+ }
+ }
+ } else {
+ if err != errUnspecifiedHost {
+ logf("[unexpected] netns: error parsing address %q: %v", address, err)
}
+ ifaceIdxV4, ifaceIdxV6 = defIfaceIdxV4, defIfaceIdxV6
}
} else {
ifaceIdxV4, ifaceIdxV6 = defIfaceIdxV4, defIfaceIdxV6
diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go
index c49ae3626a2d6..547ea3a3577ed 100644
--- a/net/packet/checksum/checksum.go
+++ b/net/packet/checksum/checksum.go
@@ -61,7 +61,7 @@ func UpdateDstAddr(q *packet.Parsed, dst netip.Addr) {
b := q.Buffer()
if dst.Is6() {
v6 := dst.As16()
- copy(b[24:36], v6[:])
+ copy(b[24:40], v6[:])
updateV6PacketChecksums(q, old, dst)
} else {
v4 := dst.As4()
diff --git a/net/packet/checksum/checksum_test.go b/net/packet/checksum/checksum_test.go
index aeb030c1c8e76..bf818743d3dbf 100644
--- a/net/packet/checksum/checksum_test.go
+++ b/net/packet/checksum/checksum_test.go
@@ -5,6 +5,7 @@ package checksum
import (
"encoding/binary"
+ "math/rand/v2"
"net/netip"
"testing"
@@ -94,7 +95,7 @@ func TestHeaderChecksumsV4(t *testing.T) {
}
func TestNatChecksumsV6UDP(t *testing.T) {
- a1, a2 := netip.MustParseAddr("a::1"), netip.MustParseAddr("b::1")
+ a1, a2 := randV6Addr(), randV6Addr()
// Make a fake UDP packet with 32 bytes of zeros as the datagram payload.
b := header.IPv6(make([]byte, header.IPv6MinimumSize+header.UDPMinimumSize+32))
@@ -124,25 +125,43 @@ func TestNatChecksumsV6UDP(t *testing.T) {
}
// Parse the packet.
- var p packet.Parsed
+ var p, p2 packet.Parsed
p.Decode(b)
t.Log(p.String())
// Update the source address of the packet to be the same as the dest.
UpdateSrcAddr(&p, a2)
+ p2.Decode(p.Buffer())
+ if p2.Src.Addr() != a2 {
+ t.Fatalf("got %v, want %v", p2.Src, a2)
+ }
if !udp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a2.AsSlice()), checksum.Checksum(b.Payload()[header.UDPMinimumSize:], 0)) {
t.Fatal("incorrect checksum after updating source address")
}
// Update the dest address of the packet to be the original source address.
UpdateDstAddr(&p, a1)
+ p2.Decode(p.Buffer())
+ if p2.Dst.Addr() != a1 {
+ t.Fatalf("got %v, want %v", p2.Dst, a1)
+ }
if !udp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a1.AsSlice()), checksum.Checksum(b.Payload()[header.UDPMinimumSize:], 0)) {
t.Fatal("incorrect checksum after updating destination address")
}
}
+func randV6Addr() netip.Addr {
+ a1, a2 := rand.Int64(), rand.Int64()
+ return netip.AddrFrom16([16]byte{
+ byte(a1 >> 56), byte(a1 >> 48), byte(a1 >> 40), byte(a1 >> 32),
+ byte(a1 >> 24), byte(a1 >> 16), byte(a1 >> 8), byte(a1),
+ byte(a2 >> 56), byte(a2 >> 48), byte(a2 >> 40), byte(a2 >> 32),
+ byte(a2 >> 24), byte(a2 >> 16), byte(a2 >> 8), byte(a2),
+ })
+}
+
func TestNatChecksumsV6TCP(t *testing.T) {
- a1, a2 := netip.MustParseAddr("a::1"), netip.MustParseAddr("b::1")
+ a1, a2 := randV6Addr(), randV6Addr()
// Make a fake TCP packet with no payload.
b := header.IPv6(make([]byte, header.IPv6MinimumSize+header.TCPMinimumSize))
@@ -178,18 +197,26 @@ func TestNatChecksumsV6TCP(t *testing.T) {
}
// Parse the packet.
- var p packet.Parsed
+ var p, p2 packet.Parsed
p.Decode(b)
t.Log(p.String())
// Update the source address of the packet to be the same as the dest.
UpdateSrcAddr(&p, a2)
+ p2.Decode(p.Buffer())
+ if p2.Src.Addr() != a2 {
+ t.Fatalf("got %v, want %v", p2.Src, a2)
+ }
if !tcp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a2.AsSlice()), 0, 0) {
t.Fatal("incorrect checksum after updating source address")
}
// Update the dest address of the packet to be the original source address.
UpdateDstAddr(&p, a1)
+ p2.Decode(p.Buffer())
+ if p2.Dst.Addr() != a1 {
+ t.Fatalf("got %v, want %v", p2.Dst, a1)
+ }
if !tcp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a1.AsSlice()), 0, 0) {
t.Fatal("incorrect checksum after updating destination address")
}
diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go
index b774ebe2477b2..0d651537fac9a 100644
--- a/net/socks5/socks5.go
+++ b/net/socks5/socks5.go
@@ -13,8 +13,10 @@
package socks5
import (
+ "bytes"
"context"
"encoding/binary"
+ "errors"
"fmt"
"io"
"log"
@@ -121,7 +123,7 @@ func (s *Server) Serve(l net.Listener) error {
}
go func() {
defer c.Close()
- conn := &Conn{clientConn: c, srv: s}
+ conn := &Conn{logf: s.Logf, clientConn: c, srv: s}
err := conn.Run()
if err != nil {
s.logf("client connection failed: %v", err)
@@ -136,9 +138,12 @@ type Conn struct {
// The struct is filled by each of the internal
// methods in turn as the transaction progresses.
+ logf logger.Logf
srv *Server
clientConn net.Conn
request *request
+
+ udpClientAddr net.Addr
}
// Run starts the new connection.
@@ -172,58 +177,59 @@ func (c *Conn) Run() error {
func (c *Conn) handleRequest() error {
req, err := parseClientRequest(c.clientConn)
if err != nil {
- res := &response{reply: generalFailure}
+ res := errorResponse(generalFailure)
buf, _ := res.marshal()
c.clientConn.Write(buf)
return err
}
- if req.command != connect {
- res := &response{reply: commandNotSupported}
+
+ c.request = req
+ switch req.command {
+ case connect:
+ return c.handleTCP()
+ case udpAssociate:
+ return c.handleUDP()
+ default:
+ res := errorResponse(commandNotSupported)
buf, _ := res.marshal()
c.clientConn.Write(buf)
return fmt.Errorf("unsupported command %v", req.command)
}
- c.request = req
+}
+func (c *Conn) handleTCP() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
srv, err := c.srv.dial(
ctx,
"tcp",
- net.JoinHostPort(c.request.destination, strconv.Itoa(int(c.request.port))),
+ c.request.destination.hostPort(),
)
if err != nil {
- res := &response{reply: generalFailure}
+ res := errorResponse(generalFailure)
buf, _ := res.marshal()
c.clientConn.Write(buf)
return err
}
defer srv.Close()
- serverAddr, serverPortStr, err := net.SplitHostPort(srv.LocalAddr().String())
+
+ localAddr := srv.LocalAddr().String()
+ serverAddr, serverPort, err := splitHostPort(localAddr)
if err != nil {
return err
}
- serverPort, _ := strconv.Atoi(serverPortStr)
- var bindAddrType addrType
- if ip := net.ParseIP(serverAddr); ip != nil {
- if ip.To4() != nil {
- bindAddrType = ipv4
- } else {
- bindAddrType = ipv6
- }
- } else {
- bindAddrType = domainName
- }
res := &response{
- reply: success,
- bindAddrType: bindAddrType,
- bindAddr: serverAddr,
- bindPort: uint16(serverPort),
+ reply: success,
+ bindAddr: socksAddr{
+ addrType: getAddrType(serverAddr),
+ addr: serverAddr,
+ port: serverPort,
+ },
}
buf, err := res.marshal()
if err != nil {
- res = &response{reply: generalFailure}
+ res = errorResponse(generalFailure)
buf, _ = res.marshal()
}
c.clientConn.Write(buf)
@@ -246,6 +252,208 @@ func (c *Conn) handleRequest() error {
return <-errc
}
+func (c *Conn) handleUDP() error {
+ // The DST.ADDR and DST.PORT fields contain the address and port that
+ // the client expects to use to send UDP datagrams on for the
+ // association. The server MAY use this information to limit access
+ // to the association.
+ // @see Page 6, https://datatracker.ietf.org/doc/html/rfc1928.
+ //
+ // We do NOT limit the access from the client currently in this implementation.
+ _ = c.request.destination
+
+ addr := c.clientConn.LocalAddr()
+ host, _, err := net.SplitHostPort(addr.String())
+ if err != nil {
+ return err
+ }
+ clientUDPConn, err := net.ListenPacket("udp", net.JoinHostPort(host, "0"))
+ if err != nil {
+ res := errorResponse(generalFailure)
+ buf, _ := res.marshal()
+ c.clientConn.Write(buf)
+ return err
+ }
+ defer clientUDPConn.Close()
+
+ serverUDPConn, err := net.ListenPacket("udp", "[::]:0")
+ if err != nil {
+ res := errorResponse(generalFailure)
+ buf, _ := res.marshal()
+ c.clientConn.Write(buf)
+ return err
+ }
+ defer serverUDPConn.Close()
+
+ bindAddr, bindPort, err := splitHostPort(clientUDPConn.LocalAddr().String())
+ if err != nil {
+ return err
+ }
+
+ res := &response{
+ reply: success,
+ bindAddr: socksAddr{
+ addrType: getAddrType(bindAddr),
+ addr: bindAddr,
+ port: bindPort,
+ },
+ }
+ buf, err := res.marshal()
+ if err != nil {
+ res = errorResponse(generalFailure)
+ buf, _ = res.marshal()
+ }
+ c.clientConn.Write(buf)
+
+ return c.transferUDP(c.clientConn, clientUDPConn, serverUDPConn)
+}
+
+func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, targetConn net.PacketConn) error {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ const bufferSize = 8 * 1024
+ const readTimeout = 5 * time.Second
+
+ // client -> target
+ go func() {
+ defer cancel()
+ buf := make([]byte, bufferSize)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := c.handleUDPRequest(clientConn, targetConn, buf, readTimeout)
+ if err != nil {
+ if isTimeout(err) {
+ continue
+ }
+ if errors.Is(err, net.ErrClosed) {
+ return
+ }
+ c.logf("udp transfer: handle udp request fail: %v", err)
+ }
+ }
+ }
+ }()
+
+ // target -> client
+ go func() {
+ defer cancel()
+ buf := make([]byte, bufferSize)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ err := c.handleUDPResponse(targetConn, clientConn, buf, readTimeout)
+ if err != nil {
+ if isTimeout(err) {
+ continue
+ }
+ if errors.Is(err, net.ErrClosed) {
+ return
+ }
+ c.logf("udp transfer: handle udp response fail: %v", err)
+ }
+ }
+ }
+ }()
+
+ // A UDP association terminates when the TCP connection that the UDP
+ // ASSOCIATE request arrived on terminates. RFC1928
+ _, err := io.Copy(io.Discard, associatedTCP)
+ if err != nil {
+ err = fmt.Errorf("udp associated tcp conn: %w", err)
+ }
+ return err
+}
+
+func (c *Conn) handleUDPRequest(
+ clientConn net.PacketConn,
+ targetConn net.PacketConn,
+ buf []byte,
+ readTimeout time.Duration,
+) error {
+ // add a deadline for the read to avoid blocking forever
+ _ = clientConn.SetReadDeadline(time.Now().Add(readTimeout))
+ n, addr, err := clientConn.ReadFrom(buf)
+ if err != nil {
+ return fmt.Errorf("read from client: %w", err)
+ }
+ c.udpClientAddr = addr
+ req, data, err := parseUDPRequest(buf[:n])
+ if err != nil {
+ return fmt.Errorf("parse udp request: %w", err)
+ }
+ targetAddr, err := net.ResolveUDPAddr("udp", req.addr.hostPort())
+ if err != nil {
+ c.logf("resolve target addr fail: %v", err)
+ }
+
+ nn, err := targetConn.WriteTo(data, targetAddr)
+ if err != nil {
+ return fmt.Errorf("write to target %s fail: %w", targetAddr, err)
+ }
+ if nn != len(data) {
+ return fmt.Errorf("write to target %s fail: %w", targetAddr, io.ErrShortWrite)
+ }
+ return nil
+}
+
+func (c *Conn) handleUDPResponse(
+ targetConn net.PacketConn,
+ clientConn net.PacketConn,
+ buf []byte,
+ readTimeout time.Duration,
+) error {
+ // add a deadline for the read to avoid blocking forever
+ _ = targetConn.SetReadDeadline(time.Now().Add(readTimeout))
+ n, addr, err := targetConn.ReadFrom(buf)
+ if err != nil {
+ return fmt.Errorf("read from target: %w", err)
+ }
+ host, port, err := splitHostPort(addr.String())
+ if err != nil {
+ return fmt.Errorf("split host port: %w", err)
+ }
+ hdr := udpRequest{addr: socksAddr{addrType: getAddrType(host), addr: host, port: port}}
+ pkt, err := hdr.marshal()
+ if err != nil {
+ return fmt.Errorf("marshal udp request: %w", err)
+ }
+ data := append(pkt, buf[:n]...)
+ // use addr from client to send back
+ nn, err := clientConn.WriteTo(data, c.udpClientAddr)
+ if err != nil {
+ return fmt.Errorf("write to client: %w", err)
+ }
+ if nn != len(data) {
+ return fmt.Errorf("write to client: %w", io.ErrShortWrite)
+ }
+ return nil
+}
+
+func isTimeout(err error) bool {
+ terr, ok := errors.Unwrap(err).(interface{ Timeout() bool })
+ return ok && terr.Timeout()
+}
+
+func splitHostPort(hostport string) (host string, port uint16, err error) {
+ host, portStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return "", 0, err
+ }
+ portInt, err := strconv.Atoi(portStr)
+ if err != nil {
+ return "", 0, err
+ }
+ if portInt < 0 || portInt > 65535 {
+ return "", 0, fmt.Errorf("invalid port number %d", portInt)
+ }
+ return host, uint16(portInt), nil
+}
+
// parseClientGreeting parses a request initiation packet.
func parseClientGreeting(r io.Reader, authMethod byte) error {
var hdr [2]byte
@@ -295,123 +503,205 @@ func parseClientAuth(r io.Reader) (usr, pwd string, err error) {
return string(usrBytes), string(pwdBytes), nil
}
+func getAddrType(addr string) addrType {
+ if ip := net.ParseIP(addr); ip != nil {
+ if ip.To4() != nil {
+ return ipv4
+ }
+ return ipv6
+ }
+ return domainName
+}
+
// request represents data contained within a SOCKS5
// connection request packet.
type request struct {
- command commandType
- destination string
- port uint16
- destAddrType addrType
+ command commandType
+ destination socksAddr
}
// parseClientRequest converts raw packet bytes into a
// SOCKS5Request struct.
func parseClientRequest(r io.Reader) (*request, error) {
- var hdr [4]byte
+ var hdr [3]byte
_, err := io.ReadFull(r, hdr[:])
if err != nil {
return nil, fmt.Errorf("could not read packet header")
}
cmd := hdr[1]
- destAddrType := addrType(hdr[3])
- var destination string
- var port uint16
+ destination, err := parseSocksAddr(r)
+ return &request{
+ command: commandType(cmd),
+ destination: destination,
+ }, err
+}
+
+type socksAddr struct {
+ addrType addrType
+ addr string
+ port uint16
+}
+
+var zeroSocksAddr = socksAddr{addrType: ipv4, addr: "0.0.0.0", port: 0}
+
+func parseSocksAddr(r io.Reader) (addr socksAddr, err error) {
+ var addrTypeData [1]byte
+ _, err = io.ReadFull(r, addrTypeData[:])
+ if err != nil {
+ return socksAddr{}, fmt.Errorf("could not read address type")
+ }
- if destAddrType == ipv4 {
+ dstAddrType := addrType(addrTypeData[0])
+ var destination string
+ switch dstAddrType {
+ case ipv4:
var ip [4]byte
_, err = io.ReadFull(r, ip[:])
if err != nil {
- return nil, fmt.Errorf("could not read IPv4 address")
+ return socksAddr{}, fmt.Errorf("could not read IPv4 address")
}
destination = net.IP(ip[:]).String()
- } else if destAddrType == domainName {
+ case domainName:
var dstSizeByte [1]byte
_, err = io.ReadFull(r, dstSizeByte[:])
if err != nil {
- return nil, fmt.Errorf("could not read domain name size")
+ return socksAddr{}, fmt.Errorf("could not read domain name size")
}
dstSize := int(dstSizeByte[0])
domainName := make([]byte, dstSize)
_, err = io.ReadFull(r, domainName)
if err != nil {
- return nil, fmt.Errorf("could not read domain name")
+ return socksAddr{}, fmt.Errorf("could not read domain name")
}
destination = string(domainName)
- } else if destAddrType == ipv6 {
+ case ipv6:
var ip [16]byte
_, err = io.ReadFull(r, ip[:])
if err != nil {
- return nil, fmt.Errorf("could not read IPv6 address")
+ return socksAddr{}, fmt.Errorf("could not read IPv6 address")
}
destination = net.IP(ip[:]).String()
- } else {
- return nil, fmt.Errorf("unsupported address type")
+ default:
+ return socksAddr{}, fmt.Errorf("unsupported address type")
}
var portBytes [2]byte
_, err = io.ReadFull(r, portBytes[:])
if err != nil {
- return nil, fmt.Errorf("could not read port")
+ return socksAddr{}, fmt.Errorf("could not read port")
}
- port = binary.BigEndian.Uint16(portBytes[:])
-
- return &request{
- command: commandType(cmd),
- destination: destination,
- port: port,
- destAddrType: destAddrType,
+ port := binary.BigEndian.Uint16(portBytes[:])
+ return socksAddr{
+ addrType: dstAddrType,
+ addr: destination,
+ port: port,
}, nil
}
+func (s socksAddr) marshal() ([]byte, error) {
+ var addr []byte
+ switch s.addrType {
+ case ipv4:
+ addr = net.ParseIP(s.addr).To4()
+ if addr == nil {
+ return nil, fmt.Errorf("invalid IPv4 address for binding")
+ }
+ case domainName:
+ if len(s.addr) > 255 {
+ return nil, fmt.Errorf("invalid domain name for binding")
+ }
+ addr = make([]byte, 0, len(s.addr)+1)
+ addr = append(addr, byte(len(s.addr)))
+ addr = append(addr, []byte(s.addr)...)
+ case ipv6:
+ addr = net.ParseIP(s.addr).To16()
+ if addr == nil {
+ return nil, fmt.Errorf("invalid IPv6 address for binding")
+ }
+ default:
+ return nil, fmt.Errorf("unsupported address type")
+ }
+
+ pkt := []byte{byte(s.addrType)}
+ pkt = append(pkt, addr...)
+ pkt = binary.BigEndian.AppendUint16(pkt, s.port)
+ return pkt, nil
+}
+func (s socksAddr) hostPort() string {
+ return net.JoinHostPort(s.addr, strconv.Itoa(int(s.port)))
+}
+
// response contains the contents of
// a response packet sent from the proxy
// to the client.
type response struct {
- reply replyCode
- bindAddrType addrType
- bindAddr string
- bindPort uint16
+ reply replyCode
+ bindAddr socksAddr
+}
+
+func errorResponse(code replyCode) *response {
+ return &response{reply: code, bindAddr: zeroSocksAddr}
}
// marshal converts a SOCKS5Response struct into
// a packet. If res.reply == Success, it may throw an error on
// receiving an invalid bind address. Otherwise, it will not throw.
func (res *response) marshal() ([]byte, error) {
- pkt := make([]byte, 4)
+ pkt := make([]byte, 3)
pkt[0] = socks5Version
pkt[1] = byte(res.reply)
pkt[2] = 0 // null reserved byte
- pkt[3] = byte(res.bindAddrType)
- if res.reply != success {
- return pkt, nil
+ addrPkt, err := res.bindAddr.marshal()
+ if err != nil {
+ return nil, err
}
- var addr []byte
- switch res.bindAddrType {
- case ipv4:
- addr = net.ParseIP(res.bindAddr).To4()
- if addr == nil {
- return nil, fmt.Errorf("invalid IPv4 address for binding")
- }
- case domainName:
- if len(res.bindAddr) > 255 {
- return nil, fmt.Errorf("invalid domain name for binding")
- }
- addr = make([]byte, 0, len(res.bindAddr)+1)
- addr = append(addr, byte(len(res.bindAddr)))
- addr = append(addr, []byte(res.bindAddr)...)
- case ipv6:
- addr = net.ParseIP(res.bindAddr).To16()
- if addr == nil {
- return nil, fmt.Errorf("invalid IPv6 address for binding")
- }
- default:
- return nil, fmt.Errorf("unsupported address type")
+ return append(pkt, addrPkt...), nil
+}
+
+type udpRequest struct {
+ frag byte
+ addr socksAddr
+}
+
+// +----+------+------+----------+----------+----------+
+// |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
+// +----+------+------+----------+----------+----------+
+// | 2 | 1 | 1 | Variable | 2 | Variable |
+// +----+------+------+----------+----------+----------+
+func parseUDPRequest(data []byte) (*udpRequest, []byte, error) {
+ if len(data) < 4 {
+ return nil, nil, fmt.Errorf("invalid packet length")
}
- pkt = append(pkt, addr...)
- pkt = binary.BigEndian.AppendUint16(pkt, uint16(res.bindPort))
+ // reserved bytes
+ if !(data[0] == 0 && data[1] == 0) {
+ return nil, nil, fmt.Errorf("invalid udp request header")
+ }
- return pkt, nil
+ frag := data[2]
+
+ reader := bytes.NewReader(data[3:])
+ addr, err := parseSocksAddr(reader)
+ bodyLen := reader.Len() // (*bytes.Reader).Len() return unread data length
+ body := data[len(data)-bodyLen:]
+ return &udpRequest{
+ frag: frag,
+ addr: addr,
+ }, body, err
+}
+
+func (u *udpRequest) marshal() ([]byte, error) {
+ pkt := make([]byte, 3)
+ pkt[0] = 0
+ pkt[1] = 0
+ pkt[2] = u.frag
+
+ addrPkt, err := u.addr.marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ return append(pkt, addrPkt...), nil
}
diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go
index 201a6657532f9..11ea59d4b57d1 100644
--- a/net/socks5/socks5_test.go
+++ b/net/socks5/socks5_test.go
@@ -4,6 +4,7 @@
package socks5
import (
+ "bytes"
"errors"
"fmt"
"io"
@@ -32,6 +33,19 @@ func backendServer(listener net.Listener) {
listener.Close()
}
+func udpEchoServer(conn net.PacketConn) {
+ var buf [1024]byte
+ n, addr, err := conn.ReadFrom(buf[:])
+ if err != nil {
+ panic(err)
+ }
+ _, err = conn.WriteTo(buf[:n], addr)
+ if err != nil {
+ panic(err)
+ }
+ conn.Close()
+}
+
func TestRead(t *testing.T) {
// backend server which we'll use SOCKS5 to connect to
listener, err := net.Listen("tcp", ":0")
@@ -152,3 +166,102 @@ func TestReadPassword(t *testing.T) {
t.Fatal(err)
}
}
+
+func TestUDP(t *testing.T) {
+ // backend UDP server which we'll use SOCKS5 to connect to
+ listener, err := net.ListenPacket("udp", ":0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ backendServerPort := listener.LocalAddr().(*net.UDPAddr).Port
+ go udpEchoServer(listener)
+
+ // SOCKS5 server
+ socks5, err := net.Listen("tcp", ":0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ socks5Port := socks5.Addr().(*net.TCPAddr).Port
+ go socks5Server(socks5)
+
+ // net/proxy don't support UDP, so we need to manually send the SOCKS5 UDP request
+ conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", socks5Port))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = conn.Write([]byte{0x05, 0x01, 0x00}) // client hello with no auth
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := make([]byte, 1024)
+ n, err := conn.Read(buf) // server hello
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 2 || buf[0] != 0x05 || buf[1] != 0x00 {
+ t.Fatalf("got: %q want: 0x05 0x00", buf[:n])
+ }
+
+ targetAddr := socksAddr{
+ addrType: domainName,
+ addr: "localhost",
+ port: uint16(backendServerPort),
+ }
+ targetAddrPkt, err := targetAddr.marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = conn.Write(append([]byte{0x05, 0x03, 0x00}, targetAddrPkt...)) // client reqeust
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n, err = conn.Read(buf) // server response
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n < 3 || !bytes.Equal(buf[:3], []byte{0x05, 0x00, 0x00}) {
+ t.Fatalf("got: %q want: 0x05 0x00 0x00", buf[:n])
+ }
+ udpProxySocksAddr, err := parseSocksAddr(bytes.NewReader(buf[3:n]))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ udpProxyAddr, err := net.ResolveUDPAddr("udp", udpProxySocksAddr.hostPort())
+ if err != nil {
+ t.Fatal(err)
+ }
+ udpConn, err := net.DialUDP("udp", nil, udpProxyAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ udpPayload, err := (&udpRequest{addr: targetAddr}).marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+ udpPayload = append(udpPayload, []byte("Test")...)
+ _, err = udpConn.Write(udpPayload) // send udp package
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, _, err = udpConn.ReadFrom(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, responseBody, err := parseUDPRequest(buf[:n]) // read udp response
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(responseBody) != "Test" {
+ t.Fatalf("got: %q want: Test", responseBody)
+ }
+ err = udpConn.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = conn.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go
index 087d2fbce1687..a49e7f0f730ee 100644
--- a/net/tlsdial/tlsdial.go
+++ b/net/tlsdial/tlsdial.go
@@ -26,6 +26,7 @@ import (
"tailscale.com/envknob"
"tailscale.com/health"
+ "tailscale.com/hostinfo"
)
var counterFallbackOK int32 // atomic
@@ -76,17 +77,36 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config {
// own cert verification, as do the same work that it'd do
// (with the baked-in fallback root) in the VerifyConnection hook.
conf.InsecureSkipVerify = true
- conf.VerifyConnection = func(cs tls.ConnectionState) error {
+ conf.VerifyConnection = func(cs tls.ConnectionState) (retErr error) {
+ if host == "log.tailscale.io" && hostinfo.IsNATLabGuestVM() {
+ // Allow log.tailscale.io TLS MITM for integration tests when
+ // the client's running within a NATLab VM.
+ return nil
+ }
+
// Perform some health checks on this certificate before we do
// any verification.
+ var selfSignedIssuer string
+ if certs := cs.PeerCertificates; len(certs) > 0 && certIsSelfSigned(certs[0]) {
+ selfSignedIssuer = certs[0].Issuer.String()
+ }
if ht != nil {
- if certIsSelfSigned(cs.PeerCertificates[0]) {
- // Self-signed certs are never valid.
- ht.SetTLSConnectionError(cs.ServerName, fmt.Errorf("certificate is self-signed"))
- } else {
- // Ensure we clear any error state for this ServerName.
- ht.SetTLSConnectionError(cs.ServerName, nil)
- }
+ defer func() {
+ if retErr != nil && selfSignedIssuer != "" {
+ // Self-signed certs are never valid.
+ //
+ // TODO(bradfitz): plumb down the selfSignedIssuer as a
+ // structured health warning argument.
+ ht.SetTLSConnectionError(cs.ServerName, fmt.Errorf("likely intercepted connection; certificate is self-signed by %v", selfSignedIssuer))
+ } else {
+ // Ensure we clear any error state for this ServerName.
+ ht.SetTLSConnectionError(cs.ServerName, nil)
+ if selfSignedIssuer != "" {
+ // Log the self-signed issuer, but don't treat it as an error.
+ log.Printf("tlsdial: warning: server cert for %q passed x509 validation but is self-signed by %q", host, selfSignedIssuer)
+ }
+ }
+ }()
}
// First try doing x509 verification with the system's
diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go
index f0c4b10421f5e..3606dd67f7ea2 100644
--- a/net/tsdial/tsdial.go
+++ b/net/tsdial/tsdial.go
@@ -166,6 +166,7 @@ func (d *Dialer) Close() error {
c.Close()
}
d.activeSysConns = nil
+ d.PeerAPITransport().CloseIdleConnections()
return nil
}
diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go
index 8ea73b4b20b13..24defba27a782 100644
--- a/net/tstun/wrap.go
+++ b/net/tstun/wrap.go
@@ -10,6 +10,7 @@ import (
"net/netip"
"os"
"reflect"
+ "runtime"
"slices"
"strings"
"sync"
@@ -17,6 +18,7 @@ import (
"time"
"github.com/gaissmai/bart"
+ "github.com/tailscale/wireguard-go/conn"
"github.com/tailscale/wireguard-go/device"
"github.com/tailscale/wireguard-go/tun"
"go4.org/mem"
@@ -160,6 +162,10 @@ type Wrapper struct {
PreFilterPacketInboundFromWireGuard FilterFunc
// PostFilterPacketInboundFromWireGuard is the inbound filter function that runs after the main filter.
PostFilterPacketInboundFromWireGuard FilterFunc
+ // EndPacketVectorInboundFromWireGuardFlush is a function that runs after all packets in a given vector
+ // have been handled by all filters. Filters may queue packets for the purposes of GRO, requiring an
+ // explicit flush.
+ EndPacketVectorInboundFromWireGuardFlush func()
// PreFilterPacketOutboundToWireGuardNetstackIntercept is a filter function that runs before the main filter
// for packets from the local system. This filter is populated by netstack to hook
// packets that should be handled by netstack. If set, this filter runs before
@@ -894,13 +900,7 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) {
return 0, res.err
}
if res.data == nil {
- n, err := t.injectedRead(res.injected, buffs[0], offset)
- sizes[0] = n
- if err != nil && n == 0 {
- return 0, err
- }
-
- return 1, err
+ return t.injectedRead(res.injected, buffs, sizes, offset)
}
metricPacketOut.Add(int64(len(res.data)))
@@ -955,27 +955,85 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) {
return buffsPos, res.err
}
-// injectedRead handles injected reads, which bypass filters.
-func (t *Wrapper) injectedRead(res tunInjectedRead, buf []byte, offset int) (int, error) {
- metricPacketOut.Add(1)
+const (
+ minTCPHeaderSize = 20
+)
+
+func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) {
+ options := tun.GSOOptions{
+ CsumStart: gso.L3HdrLen,
+ CsumOffset: gso.CsumOffset,
+ GSOSize: gso.MSS,
+ NeedsCsum: gso.NeedsCsum,
+ }
+ switch gso.Type {
+ case stack.GSONone:
+ options.GSOType = tun.GSONone
+ return options, nil
+ case stack.GSOTCPv4:
+ options.GSOType = tun.GSOTCPv4
+ case stack.GSOTCPv6:
+ options.GSOType = tun.GSOTCPv6
+ default:
+ return tun.GSOOptions{}, fmt.Errorf("unsupported gVisor GSOType: %v", gso.Type)
+ }
+ // options.HdrLen is both layer 3 and 4 together, whereas gVisor only
+ // gives us layer 3 length. We have to gather TCP header length
+ // ourselves.
+ if len(pkt) < int(gso.L3HdrLen)+minTCPHeaderSize {
+ return tun.GSOOptions{}, errors.New("gVisor GSOTCP packet length too short")
+ }
+ tcphLen := uint16(pkt[int(gso.L3HdrLen)+12] >> 4 * 4)
+ options.HdrLen = gso.L3HdrLen + tcphLen
+ return options, nil
+}
- var n int
- if !res.packet.IsNil() {
+func invertGSOChecksum(pkt []byte, gso stack.GSO) {
+ if gso.NeedsCsum != true {
+ return
+ }
+ at := int(gso.L3HdrLen + gso.CsumOffset)
+ if at+1 > len(pkt)-1 {
+ return
+ }
+ pkt[at] = ^pkt[at]
+ pkt[at+1] = ^pkt[at+1]
+}
- n = copy(buf[offset:], res.packet.NetworkHeader().Slice())
- n += copy(buf[offset+n:], res.packet.TransportHeader().Slice())
- n += copy(buf[offset+n:], res.packet.Data().AsRange().ToSlice())
- res.packet.DecRef()
+// injectedRead handles injected reads, which bypass filters.
+func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []int, offset int) (n int, err error) {
+ var gso stack.GSO
+
+ pkt := outBuffs[0][offset:]
+ if res.packet != nil {
+ bufN := copy(pkt, res.packet.NetworkHeader().Slice())
+ bufN += copy(pkt[bufN:], res.packet.TransportHeader().Slice())
+ bufN += copy(pkt[bufN:], res.packet.Data().AsRange().ToSlice())
+ gso = res.packet.GSOOptions
+ pkt = pkt[:bufN]
+ defer res.packet.DecRef() // defer DecRef so we may continue to reference it
} else {
- n = copy(buf[offset:], res.data)
+ sizes[0] = copy(pkt, res.data)
+ pkt = pkt[:sizes[0]]
+ n = 1
}
pc := t.peerConfig.Load()
p := parsedPacketPool.Get().(*packet.Parsed)
defer parsedPacketPool.Put(p)
- p.Decode(buf[offset : offset+n])
+ p.Decode(pkt)
+
+ // We invert the transport layer checksum before and after snat() if gVisor
+ // handed us a segment with a partial checksum. A partial checksum is not a
+ // ones' complement of the sum, and incremental checksum updating that could
+ // occur as a result of snat() is not aware of this. Alternatively we could
+ // plumb partial transport layer checksum awareness down through snat(),
+ // but the surface area of such a change is much larger, and not yet
+ // justified by this singular case.
+ invertGSOChecksum(pkt, gso)
pc.snat(p)
+ invertGSOChecksum(pkt, gso)
if m := t.destIPActivity.Load(); m != nil {
if fn := m[p.Dst.Addr()]; fn != nil {
@@ -983,11 +1041,24 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, buf []byte, offset int) (int
}
}
+ if res.packet != nil {
+ var gsoOptions tun.GSOOptions
+ gsoOptions, err = stackGSOToTunGSO(pkt, gso)
+ if err != nil {
+ return 0, err
+ }
+ n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset)
+ }
+
if stats := t.stats.Load(); stats != nil {
- stats.UpdateTxVirtual(buf[offset:][:n])
+ for i := 0; i < n; i++ {
+ stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]])
+ }
}
+
t.noteActivity()
- return n, nil
+ metricPacketOut.Add(int64(n))
+ return n, err
}
func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook capture.Callback, pc *peerConfigTable) filter.Response {
@@ -1112,6 +1183,9 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) {
}
}
}
+ if t.EndPacketVectorInboundFromWireGuardFlush != nil {
+ t.EndPacketVectorInboundFromWireGuardFlush()
+ }
if t.disableFilter {
i = len(buffs)
}
@@ -1288,6 +1362,14 @@ func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error {
}
func (t *Wrapper) BatchSize() int {
+ if runtime.GOOS == "linux" {
+ // Always setup Linux to handle vectors, even in the very rare case that
+ // the underlying t.tdev returns 1. gVisor GSO is always enabled for
+ // Linux, and we cannot make a determination on gVisor usage at
+ // wireguard-go.Device startup, which is when this value matters for
+ // packet memory init.
+ return conn.IdealBatchSize
+ }
return t.tdev.BatchSize()
}
diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go
index 2e952f14f8de8..22b511ea81273 100644
--- a/net/wsconn/wsconn.go
+++ b/net/wsconn/wsconn.go
@@ -3,7 +3,7 @@
// Package wsconn contains an adapter type that turns
// a websocket connection into a net.Conn. It a temporary fork of the
-// netconn.go file from the nhooyr.io/websocket package while we wait for
+// netconn.go file from the github.com/coder/websocket package while we wait for
// https://github.com/nhooyr/websocket/pull/350 to be merged.
package wsconn
@@ -18,7 +18,7 @@ import (
"sync/atomic"
"time"
- "nhooyr.io/websocket"
+ "github.com/coder/websocket"
)
// NetConn converts a *websocket.Conn into a net.Conn.
diff --git a/posture/hwaddr.go b/posture/hwaddr.go
index a38cc5be0352e..dd0b6d8be77ce 100644
--- a/posture/hwaddr.go
+++ b/posture/hwaddr.go
@@ -22,5 +22,5 @@ func GetHardwareAddrs() (hwaddrs []string, err error) {
}
})
slices.Sort(hwaddrs)
- return
+ return slices.Compact(hwaddrs), err
}
diff --git a/prober/prober.go b/prober/prober.go
index 36afb1a39659c..2a43628bda908 100644
--- a/prober/prober.go
+++ b/prober/prober.go
@@ -7,19 +7,26 @@
package prober
import (
+ "container/ring"
"context"
- "errors"
+ "encoding/json"
"fmt"
"hash/fnv"
"log"
"maps"
"math/rand"
+ "net/http"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
+ "tailscale.com/tsweb"
)
+// recentHistSize is the number of recent probe results and latencies to keep
+// in memory.
+const recentHistSize = 10
+
// ProbeClass defines a probe of a specific type: a probing function that will
// be regularly ran, and metric labels that will be added automatically to all
// probes using this class.
@@ -106,6 +113,14 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob
l[k] = v
}
+ probe := newProbe(p, name, interval, l, pc)
+ p.probes[name] = probe
+ go probe.loop()
+ return probe
+}
+
+// newProbe creates a new Probe with the given parameters, but does not start it.
+func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe {
ctx, cancel := context.WithCancel(context.Background())
probe := &Probe{
prober: p,
@@ -117,6 +132,9 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob
probeClass: pc,
interval: interval,
initialDelay: initialDelay(name, interval),
+ successHist: ring.New(recentHistSize),
+ latencyHist: ring.New(recentHistSize),
+
metrics: prometheus.NewRegistry(),
metricLabels: l,
mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l),
@@ -131,15 +149,14 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob
Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l,
}, []string{"status"}),
}
-
- prometheus.WrapRegistererWithPrefix(p.namespace+"_", p.metrics).MustRegister(probe.metrics)
+ if p.metrics != nil {
+ prometheus.WrapRegistererWithPrefix(p.namespace+"_", p.metrics).MustRegister(probe.metrics)
+ }
probe.metrics.MustRegister(probe)
-
- p.probes[name] = probe
- go probe.loop()
return probe
}
+// unregister removes a probe from the prober's internal state.
func (p *Prober) unregister(probe *Probe) {
p.mu.Lock()
defer p.mu.Unlock()
@@ -206,6 +223,7 @@ type Probe struct {
ctx context.Context
cancel context.CancelFunc // run to initiate shutdown
stopped chan struct{} // closed when shutdown is complete
+ runMu sync.Mutex // ensures only one probe runs at a time
name string
probeClass ProbeClass
@@ -232,6 +250,10 @@ type Probe struct {
latency time.Duration // last successful probe latency
succeeded bool // whether the last doProbe call succeeded
lastErr error
+
+ // History of recent probe results and latencies.
+ successHist *ring.Ring
+ latencyHist *ring.Ring
}
// Close shuts down the Probe and unregisters it from its Prober.
@@ -278,13 +300,17 @@ func (p *Probe) loop() {
}
}
-// run invokes fun and records the results.
+// run invokes the probe function and records the result. It returns the probe
+// result and an error if the probe failed.
//
-// fun is invoked with a timeout slightly less than interval, so that
-// the probe either succeeds or fails before the next cycle is
-// scheduled to start.
-func (p *Probe) run() {
- start := p.recordStart()
+// The probe function is invoked with a timeout slightly less than interval, so
+// that the probe either succeeds or fails before the next cycle is scheduled to
+// start.
+func (p *Probe) run() (pi ProbeInfo, err error) {
+ p.runMu.Lock()
+ defer p.runMu.Unlock()
+
+ p.recordStart()
defer func() {
// Prevent a panic within one probe function from killing the
// entire prober, so that a single buggy probe doesn't destroy
@@ -293,29 +319,30 @@ func (p *Probe) run() {
// alert for debugging.
if r := recover(); r != nil {
log.Printf("probe %s panicked: %v", p.name, r)
- p.recordEnd(start, errors.New("panic"))
+ err = fmt.Errorf("panic: %v", r)
+ p.recordEnd(err)
}
}()
timeout := time.Duration(float64(p.interval) * 0.8)
ctx, cancel := context.WithTimeout(p.ctx, timeout)
defer cancel()
- err := p.probeClass.Probe(ctx)
- p.recordEnd(start, err)
+ err = p.probeClass.Probe(ctx)
+ p.recordEnd(err)
if err != nil {
log.Printf("probe %s: %v", p.name, err)
}
+ pi = p.probeInfoLocked()
+ return
}
-func (p *Probe) recordStart() time.Time {
- st := p.prober.now()
+func (p *Probe) recordStart() {
p.mu.Lock()
- defer p.mu.Unlock()
- p.start = st
- return st
+ p.start = p.prober.now()
+ p.mu.Unlock()
}
-func (p *Probe) recordEnd(start time.Time, err error) {
+func (p *Probe) recordEnd(err error) {
end := p.prober.now()
p.mu.Lock()
defer p.mu.Unlock()
@@ -327,22 +354,55 @@ func (p *Probe) recordEnd(start time.Time, err error) {
p.latency = latency
p.mAttempts.WithLabelValues("ok").Inc()
p.mSeconds.WithLabelValues("ok").Add(latency.Seconds())
+ p.latencyHist.Value = latency
+ p.latencyHist = p.latencyHist.Next()
} else {
p.latency = 0
p.mAttempts.WithLabelValues("fail").Inc()
p.mSeconds.WithLabelValues("fail").Add(latency.Seconds())
}
+ p.successHist.Value = p.succeeded
+ p.successHist = p.successHist.Next()
}
-// ProbeInfo is the state of a Probe.
+// ProbeInfo is a snapshot of the configuration and state of a Probe.
type ProbeInfo struct {
- Start time.Time
- End time.Time
- Latency string
- Result bool
- Error string
+ Name string
+ Class string
+ Interval time.Duration
+ Labels map[string]string
+ Start time.Time
+ End time.Time
+ Latency time.Duration
+ Result bool
+ Error string
+ RecentResults []bool
+ RecentLatencies []time.Duration
+}
+
+// RecentSuccessRatio returns the success ratio of the probe in the recent history.
+func (pb ProbeInfo) RecentSuccessRatio() float64 {
+ if len(pb.RecentResults) == 0 {
+ return 0
+ }
+ var sum int
+ for _, r := range pb.RecentResults {
+ if r {
+ sum++
+ }
+ }
+ return float64(sum) / float64(len(pb.RecentResults))
}
+// RecentMedianLatency returns the median latency of the probe in the recent history.
+func (pb ProbeInfo) RecentMedianLatency() time.Duration {
+ if len(pb.RecentLatencies) == 0 {
+ return 0
+ }
+ return pb.RecentLatencies[len(pb.RecentLatencies)/2]
+}
+
+// ProbeInfo returns the state of all probes.
func (p *Prober) ProbeInfo() map[string]ProbeInfo {
out := map[string]ProbeInfo{}
@@ -352,24 +412,99 @@ func (p *Prober) ProbeInfo() map[string]ProbeInfo {
probes = append(probes, probe)
}
p.mu.Unlock()
-
for _, probe := range probes {
probe.mu.Lock()
- inf := ProbeInfo{
- Start: probe.start,
- End: probe.end,
- Result: probe.succeeded,
+ out[probe.name] = probe.probeInfoLocked()
+ probe.mu.Unlock()
+ }
+ return out
+}
+
+// probeInfoLocked returns the state of the probe.
+func (probe *Probe) probeInfoLocked() ProbeInfo {
+ inf := ProbeInfo{
+ Name: probe.name,
+ Class: probe.probeClass.Class,
+ Interval: probe.interval,
+ Labels: probe.metricLabels,
+ Start: probe.start,
+ End: probe.end,
+ Result: probe.succeeded,
+ }
+ if probe.lastErr != nil {
+ inf.Error = probe.lastErr.Error()
+ }
+ if probe.latency > 0 {
+ inf.Latency = probe.latency
+ }
+ probe.latencyHist.Do(func(v any) {
+ if l, ok := v.(time.Duration); ok {
+ inf.RecentLatencies = append(inf.RecentLatencies, l)
+ }
+ })
+ probe.successHist.Do(func(v any) {
+ if r, ok := v.(bool); ok {
+ inf.RecentResults = append(inf.RecentResults, r)
}
- if probe.lastErr != nil {
- inf.Error = probe.lastErr.Error()
+ })
+ return inf
+}
+
+// RunHandlerResponse is the JSON response format for the RunHandler.
+type RunHandlerResponse struct {
+ ProbeInfo ProbeInfo
+ PreviousSuccessRatio float64
+ PreviousMedianLatency time.Duration
+}
+
+// RunHandler runs a probe by name and returns the result as an HTTP response.
+func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error {
+ // Look up prober by name.
+ name := r.FormValue("name")
+ if name == "" {
+ return tsweb.Error(http.StatusBadRequest, "missing name parameter", nil)
+ }
+ p.mu.Lock()
+ probe, ok := p.probes[name]
+ p.mu.Unlock()
+ if !ok {
+ return tsweb.Error(http.StatusNotFound, fmt.Sprintf("unknown probe %q", name), nil)
+ }
+
+ probe.mu.Lock()
+ prevInfo := probe.probeInfoLocked()
+ probe.mu.Unlock()
+
+ info, err := probe.run()
+ respStatus := http.StatusOK
+ if err != nil {
+ respStatus = http.StatusFailedDependency
+ }
+
+ // Return serialized JSON response if the client requested JSON
+ if r.Header.Get("Accept") == "application/json" {
+ resp := &RunHandlerResponse{
+ ProbeInfo: info,
+ PreviousSuccessRatio: prevInfo.RecentSuccessRatio(),
+ PreviousMedianLatency: prevInfo.RecentMedianLatency(),
}
- if probe.latency > 0 {
- inf.Latency = probe.latency.String()
+ w.WriteHeader(respStatus)
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(resp); err != nil {
+ return tsweb.Error(http.StatusInternalServerError, "error encoding JSON response", err)
}
- out[probe.name] = inf
- probe.mu.Unlock()
+ return nil
}
- return out
+
+ stats := fmt.Sprintf("Last %d probes: success rate %d%%, median latency %v\n",
+ len(prevInfo.RecentResults),
+ int(prevInfo.RecentSuccessRatio()*100), prevInfo.RecentMedianLatency())
+ if err != nil {
+ return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err)
+ }
+ w.WriteHeader(respStatus)
+ w.Write([]byte(fmt.Sprintf("Probe succeeded in %v\n%s", info.Latency, stats)))
+ return nil
}
// Describe implements prometheus.Collector.
diff --git a/prober/prober_test.go b/prober/prober_test.go
index af645ef004d92..742a914b24661 100644
--- a/prober/prober_test.go
+++ b/prober/prober_test.go
@@ -5,16 +5,22 @@ package prober
import (
"context"
+ "encoding/json"
"errors"
"fmt"
+ "io"
+ "net/http/httptest"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
"github.com/prometheus/client_golang/prometheus/testutil"
"tailscale.com/tstest"
+ "tailscale.com/tsweb"
)
const (
@@ -292,6 +298,254 @@ func TestOnceMode(t *testing.T) {
}
}
+func TestProberProbeInfo(t *testing.T) {
+ clk := newFakeTime()
+ p := newForTest(clk.Now, clk.NewTicker).WithOnce(true)
+
+ p.Run("probe1", probeInterval, nil, FuncProbe(func(context.Context) error {
+ clk.Advance(500 * time.Millisecond)
+ return nil
+ }))
+ p.Run("probe2", probeInterval, nil, FuncProbe(func(context.Context) error { return fmt.Errorf("error2") }))
+ p.Wait()
+
+ info := p.ProbeInfo()
+ wantInfo := map[string]ProbeInfo{
+ "probe1": {
+ Name: "probe1",
+ Interval: probeInterval,
+ Labels: map[string]string{"class": "", "name": "probe1"},
+ Latency: 500 * time.Millisecond,
+ Result: true,
+ RecentResults: []bool{true},
+ RecentLatencies: []time.Duration{500 * time.Millisecond},
+ },
+ "probe2": {
+ Name: "probe2",
+ Interval: probeInterval,
+ Labels: map[string]string{"class": "", "name": "probe2"},
+ Error: "error2",
+ RecentResults: []bool{false},
+ RecentLatencies: nil, // no latency for failed probes
+ },
+ }
+
+ if diff := cmp.Diff(wantInfo, info, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End")); diff != "" {
+ t.Fatalf("unexpected ProbeInfo (-want +got):\n%s", diff)
+ }
+}
+
+func TestProbeInfoRecent(t *testing.T) {
+ type probeResult struct {
+ latency time.Duration
+ err error
+ }
+ tests := []struct {
+ name string
+ results []probeResult
+ wantProbeInfo ProbeInfo
+ wantRecentSuccessRatio float64
+ wantRecentMedianLatency time.Duration
+ }{
+ {
+ name: "no_runs",
+ wantProbeInfo: ProbeInfo{},
+ wantRecentSuccessRatio: 0,
+ wantRecentMedianLatency: 0,
+ },
+ {
+ name: "single_success",
+ results: []probeResult{{latency: 100 * time.Millisecond, err: nil}},
+ wantProbeInfo: ProbeInfo{
+ Latency: 100 * time.Millisecond,
+ Result: true,
+ RecentResults: []bool{true},
+ RecentLatencies: []time.Duration{100 * time.Millisecond},
+ },
+ wantRecentSuccessRatio: 1,
+ wantRecentMedianLatency: 100 * time.Millisecond,
+ },
+ {
+ name: "single_failure",
+ results: []probeResult{{latency: 100 * time.Millisecond, err: errors.New("error123")}},
+ wantProbeInfo: ProbeInfo{
+ Result: false,
+ RecentResults: []bool{false},
+ RecentLatencies: nil,
+ Error: "error123",
+ },
+ wantRecentSuccessRatio: 0,
+ wantRecentMedianLatency: 0,
+ },
+ {
+ name: "recent_mix",
+ results: []probeResult{
+ {latency: 10 * time.Millisecond, err: errors.New("error1")},
+ {latency: 20 * time.Millisecond, err: nil},
+ {latency: 30 * time.Millisecond, err: nil},
+ {latency: 40 * time.Millisecond, err: errors.New("error4")},
+ {latency: 50 * time.Millisecond, err: nil},
+ {latency: 60 * time.Millisecond, err: nil},
+ {latency: 70 * time.Millisecond, err: errors.New("error7")},
+ {latency: 80 * time.Millisecond, err: nil},
+ },
+ wantProbeInfo: ProbeInfo{
+ Result: true,
+ Latency: 80 * time.Millisecond,
+ RecentResults: []bool{false, true, true, false, true, true, false, true},
+ RecentLatencies: []time.Duration{
+ 20 * time.Millisecond,
+ 30 * time.Millisecond,
+ 50 * time.Millisecond,
+ 60 * time.Millisecond,
+ 80 * time.Millisecond,
+ },
+ },
+ wantRecentSuccessRatio: 0.625,
+ wantRecentMedianLatency: 50 * time.Millisecond,
+ },
+ {
+ name: "only_last_10",
+ results: []probeResult{
+ {latency: 10 * time.Millisecond, err: errors.New("old_error")},
+ {latency: 20 * time.Millisecond, err: nil},
+ {latency: 30 * time.Millisecond, err: nil},
+ {latency: 40 * time.Millisecond, err: nil},
+ {latency: 50 * time.Millisecond, err: nil},
+ {latency: 60 * time.Millisecond, err: nil},
+ {latency: 70 * time.Millisecond, err: nil},
+ {latency: 80 * time.Millisecond, err: nil},
+ {latency: 90 * time.Millisecond, err: nil},
+ {latency: 100 * time.Millisecond, err: nil},
+ {latency: 110 * time.Millisecond, err: nil},
+ },
+ wantProbeInfo: ProbeInfo{
+ Result: true,
+ Latency: 110 * time.Millisecond,
+ RecentResults: []bool{true, true, true, true, true, true, true, true, true, true},
+ RecentLatencies: []time.Duration{
+ 20 * time.Millisecond,
+ 30 * time.Millisecond,
+ 40 * time.Millisecond,
+ 50 * time.Millisecond,
+ 60 * time.Millisecond,
+ 70 * time.Millisecond,
+ 80 * time.Millisecond,
+ 90 * time.Millisecond,
+ 100 * time.Millisecond,
+ 110 * time.Millisecond,
+ },
+ },
+ wantRecentSuccessRatio: 1,
+ wantRecentMedianLatency: 70 * time.Millisecond,
+ },
+ }
+
+ clk := newFakeTime()
+ p := newForTest(clk.Now, clk.NewTicker).WithOnce(true)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ probe := newProbe(p, "", probeInterval, nil, FuncProbe(func(context.Context) error { return nil }))
+ for _, r := range tt.results {
+ probe.recordStart()
+ clk.Advance(r.latency)
+ probe.recordEnd(r.err)
+ }
+ info := probe.probeInfoLocked()
+ if diff := cmp.Diff(tt.wantProbeInfo, info, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Interval")); diff != "" {
+ t.Fatalf("unexpected ProbeInfo (-want +got):\n%s", diff)
+ }
+ if got := info.RecentSuccessRatio(); got != tt.wantRecentSuccessRatio {
+ t.Errorf("recentSuccessRatio() = %v, want %v", got, tt.wantRecentSuccessRatio)
+ }
+ if got := info.RecentMedianLatency(); got != tt.wantRecentMedianLatency {
+ t.Errorf("recentMedianLatency() = %v, want %v", got, tt.wantRecentMedianLatency)
+ }
+ })
+ }
+}
+
+func TestProberRunHandler(t *testing.T) {
+ clk := newFakeTime()
+
+ tests := []struct {
+ name string
+ probeFunc func(context.Context) error
+ wantResponseCode int
+ wantJSONResponse RunHandlerResponse
+ wantPlaintextResponse string
+ }{
+ {
+ name: "success",
+ probeFunc: func(context.Context) error { return nil },
+ wantResponseCode: 200,
+ wantJSONResponse: RunHandlerResponse{
+ ProbeInfo: ProbeInfo{
+ Name: "success",
+ Interval: probeInterval,
+ Result: true,
+ RecentResults: []bool{true, true},
+ },
+ PreviousSuccessRatio: 1,
+ },
+ wantPlaintextResponse: "Probe succeeded",
+ },
+ {
+ name: "failure",
+ probeFunc: func(context.Context) error { return fmt.Errorf("error123") },
+ wantResponseCode: 424,
+ wantJSONResponse: RunHandlerResponse{
+ ProbeInfo: ProbeInfo{
+ Name: "failure",
+ Interval: probeInterval,
+ Result: false,
+ Error: "error123",
+ RecentResults: []bool{false, false},
+ },
+ },
+ wantPlaintextResponse: "Probe failed",
+ },
+ }
+
+ for _, tt := range tests {
+ for _, reqJSON := range []bool{true, false} {
+ t.Run(fmt.Sprintf("%s_json-%v", tt.name, reqJSON), func(t *testing.T) {
+ p := newForTest(clk.Now, clk.NewTicker).WithOnce(true)
+ probe := p.Run(tt.name, probeInterval, nil, FuncProbe(tt.probeFunc))
+ defer probe.Close()
+ <-probe.stopped // wait for the first run.
+
+ w := httptest.NewRecorder()
+
+ req := httptest.NewRequest("GET", "/prober/run/?name="+tt.name, nil)
+ if reqJSON {
+ req.Header.Set("Accept", "application/json")
+ }
+ tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{}).ServeHTTP(w, req)
+ if w.Result().StatusCode != tt.wantResponseCode {
+ t.Errorf("unexpected response code: got %d, want %d", w.Code, tt.wantResponseCode)
+ }
+
+ if reqJSON {
+ var gotJSON RunHandlerResponse
+ if err := json.Unmarshal(w.Body.Bytes(), &gotJSON); err != nil {
+ t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, w.Body.String())
+ }
+ if diff := cmp.Diff(tt.wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" {
+ t.Errorf("unexpected JSON response (-want +got):\n%s", diff)
+ }
+ } else {
+ body, _ := io.ReadAll(w.Result().Body)
+ if !strings.Contains(string(body), tt.wantPlaintextResponse) {
+ t.Errorf("unexpected response body: got %q, want to contain %q", body, tt.wantPlaintextResponse)
+ }
+ }
+ })
+ }
+ }
+
+}
+
type fakeTicker struct {
ch chan time.Time
interval time.Duration
diff --git a/prober/status.go b/prober/status.go
new file mode 100644
index 0000000000000..aa9ef99d05d2c
--- /dev/null
+++ b/prober/status.go
@@ -0,0 +1,124 @@
+// Copyright (c) Tailscale Inc & AUTHORS
+// SPDX-License-Identifier: BSD-3-Clause
+
+package prober
+
+import (
+ "embed"
+ "fmt"
+ "html/template"
+ "net/http"
+ "strings"
+ "time"
+
+ "tailscale.com/tsweb"
+ "tailscale.com/util/mak"
+)
+
+//go:embed status.html
+var statusFiles embed.FS
+var statusTpl = template.Must(template.ParseFS(statusFiles, "status.html"))
+
+type statusHandlerOpt func(*statusHandlerParams)
+type statusHandlerParams struct {
+ title string
+
+ pageLinks map[string]string
+ probeLinks map[string]string
+}
+
+// WithTitle sets the title of the status page.
+func WithTitle(title string) statusHandlerOpt {
+ return func(opts *statusHandlerParams) {
+ opts.title = title
+ }
+}
+
+// WithPageLink adds a top-level link to the status page.
+func WithPageLink(text, url string) statusHandlerOpt {
+ return func(opts *statusHandlerParams) {
+ mak.Set(&opts.pageLinks, text, url)
+ }
+}
+
+// WithProbeLink adds a link to each probe on the status page.
+// The textTpl and urlTpl are Go templates that will be rendered
+// with the respective ProbeInfo struct as the data.
+func WithProbeLink(textTpl, urlTpl string) statusHandlerOpt {
+ return func(opts *statusHandlerParams) {
+ mak.Set(&opts.probeLinks, textTpl, urlTpl)
+ }
+}
+
+// StatusHandler is a handler for the probe overview HTTP endpoint.
+// It shows a list of probes and their current status.
+func (p *Prober) StatusHandler(opts ...statusHandlerOpt) tsweb.ReturnHandlerFunc {
+ params := &statusHandlerParams{
+ title: "Prober Status",
+ }
+ for _, opt := range opts {
+ opt(params)
+ }
+ return func(w http.ResponseWriter, r *http.Request) error {
+ type probeStatus struct {
+ ProbeInfo
+ TimeSinceLast time.Duration
+ Links map[string]template.URL
+ }
+ vars := struct {
+ Title string
+ Links map[string]template.URL
+ TotalProbes int64
+ UnhealthyProbes int64
+ Probes map[string]probeStatus
+ }{
+ Title: params.title,
+ }
+
+ for text, url := range params.pageLinks {
+ mak.Set(&vars.Links, text, template.URL(url))
+ }
+
+ for name, info := range p.ProbeInfo() {
+ vars.TotalProbes++
+ if !info.Result {
+ vars.UnhealthyProbes++
+ }
+ s := probeStatus{ProbeInfo: info}
+ if !info.End.IsZero() {
+ s.TimeSinceLast = time.Since(info.End).Truncate(time.Second)
+ }
+ for textTpl, urlTpl := range params.probeLinks {
+ text, err := renderTemplate(textTpl, info)
+ if err != nil {
+ return tsweb.Error(500, err.Error(), err)
+ }
+ url, err := renderTemplate(urlTpl, info)
+ if err != nil {
+ return tsweb.Error(500, err.Error(), err)
+ }
+ mak.Set(&s.Links, text, template.URL(url))
+ }
+ mak.Set(&vars.Probes, name, s)
+ }
+
+ if err := statusTpl.ExecuteTemplate(w, "status", vars); err != nil {
+ return tsweb.HTTPError{Code: 500, Err: err, Msg: "error rendering status page"}
+ }
+ return nil
+ }
+}
+
+// renderTemplate renders the given Go template with the provided data
+// and returns the result as a string.
+func renderTemplate(tpl string, data any) (string, error) {
+ t, err := template.New("").Parse(tpl)
+ if err != nil {
+ return "", fmt.Errorf("error parsing template %q: %w", tpl, err)
+ }
+ var buf strings.Builder
+ if err := t.ExecuteTemplate(&buf, "", data); err != nil {
+ return "", fmt.Errorf("error rendering template %q with data %v: %w", tpl, data, err)
+ }
+ return buf.String(), nil
+}
diff --git a/prober/status.html b/prober/status.html
new file mode 100644
index 0000000000000..ff0f06c13fe62
--- /dev/null
+++ b/prober/status.html
@@ -0,0 +1,132 @@
+{{define "status"}}
+
+ {{.Title}}
+
+
+
{{.Title}}
+
+
Prober Status:
+ {{if .UnhealthyProbes }}
+ {{.UnhealthyProbes}}
+ out of {{.TotalProbes}} probes failed or never ran.
+ {{else}}
+ All {{.TotalProbes}} probes are healthy
+ {{end}}
+