diff --git a/Dockerfile b/Dockerfile index 8534563f6cf57..8166000eda6fc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,13 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -############################################################################ +# Note that this Dockerfile is currently NOT used to build any of the published +# Tailscale container images and may have drifted from the image build mechanism +# we use. +# Tailscale images are currently built using https://github.com/tailscale/mkctr, +# and the build script can be found in ./build_docker.sh. # -# WARNING: Tailscale is not yet officially supported in container -# environments, such as Docker and Kubernetes. Though it should work, we -# don't regularly test it, and we know there are some feature limitations. # -# See current bugs tagged "containers": -# https://github.com/tailscale/tailscale/labels/containers -# -############################################################################ - # This Dockerfile includes all the tailscale binaries. # # To build the Dockerfile: @@ -46,7 +42,7 @@ RUN go install \ gvisor.dev/gvisor/pkg/tcpip/stack \ golang.org/x/crypto/ssh \ golang.org/x/crypto/acme \ - nhooyr.io/websocket \ + github.com/coder/websocket \ github.com/mdlayher/netlink COPY . . diff --git a/VERSION.txt b/VERSION.txt index 832e9afb6c139..0834888f55808 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.70.0 +1.72.0 diff --git a/build_docker.sh b/build_docker.sh index 43665172a60ec..1cbdc4b9ef8e8 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -1,21 +1,11 @@ #!/usr/bin/env sh - -# -# Runs `go build` with flags configured for docker distribution. All -# it does differently from `go build` is burn git commit and version -# information into the binaries inside docker, so that we can track down user -# issues. -# -############################################################################ -# -# WARNING: Tailscale is not yet officially supported in container -# environments, such as Docker and Kubernetes. Though it should work, we -# don't regularly test it, and we know there are some feature limitations. -# -# See current bugs tagged "containers": -# https://github.com/tailscale/tailscale/labels/containers # -############################################################################ +# This script builds Tailscale container images using +# github.com/tailscale/mkctr. +# By default the images will be tagged with the current version and git +# hash of this repository as produced by ./cmd/mkversion. +# This is the image build mechanim used to build the official Tailscale +# container images. set -eu @@ -49,7 +39,7 @@ case "$TARGET" in -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ --base="${BASE}" \ --tags="${TAGS}" \ - --gotags="ts_kube" \ + --gotags="ts_kube,ts_package_container" \ --repos="${REPOS}" \ --push="${PUSH}" \ --target="${PLATFORM}" \ diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go index 48a030a8bd7b5..fc672ded881b4 100644 --- a/client/tailscale/acl.go +++ b/client/tailscale/acl.go @@ -161,7 +161,12 @@ func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) { // ACLTestFailureSummary specifies the JSON format sent to the // JavaScript client to be rendered in the HTML. type ACLTestFailureSummary struct { - User string `json:"user,omitempty"` + // User is the source ("src") value of the ACL test that failed. + // The name "user" is a legacy holdover from the original naming and + // is kept for compatibility but it may also contain any value + // that's valid in a ACL test "src" field. + User string `json:"user,omitempty"` + Errors []string `json:"errors,omitempty"` Warnings []string `json:"warnings,omitempty"` } @@ -281,6 +286,9 @@ type UserRuleMatch struct { Users []string `json:"users"` Ports []string `json:"ports"` LineNumber int `json:"lineNumber"` + // Via is the list of targets through which Users can access Ports. + // See https://tailscale.com/kb/1378/via for more information. + Via []string `json:"via,omitempty"` // Postures is a list of posture policies that are // associated with this match. The rules can be looked diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 67bd0c5cfaba5..6f27e56b86fc4 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -69,6 +69,14 @@ type LocalClient struct { // connecting to the GUI client variants. UseSocketOnly bool + // OmitAuth, if true, omits sending the local Tailscale daemon any + // authentication token that might be required by the platform. + // + // As of 2024-08-12, only macOS uses an authentication token. OmitAuth is + // meant for when Dial is set and the LocalAPI is being proxied to a + // different operating system, such as in integration tests. + OmitAuth bool + // tsClient does HTTP requests to the local Tailscale daemon. // It's lazily initialized on first use. tsClient *http.Client @@ -124,8 +132,10 @@ func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error) }, } }) - if _, token, err := safesocket.LocalTCPPortAndToken(); err == nil { - req.SetBasicAuth("", token) + if !lc.OmitAuth { + if _, token, err := safesocket.LocalTCPPortAndToken(); err == nil { + req.SetBasicAuth("", token) + } } return lc.tsClient.Do(req) } @@ -933,7 +943,20 @@ func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err e // // API maturity: this is considered a stable API. func (lc *LocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - res, err := lc.send(ctx, "GET", "/localapi/v0/cert/"+domain+"?type=pair", 200, nil) + return lc.CertPairWithValidity(ctx, domain, 0) +} + +// CertPairWithValidity returns a cert and private key for the provided DNS +// domain. +// +// It returns a cached certificate from disk if it's still valid. +// When minValidity is non-zero, the returned certificate will be valid for at +// least the given duration, if permitted by the CA. If the certificate is +// valid, but for less than minValidity, it will be synchronously renewed. +// +// API maturity: this is considered a stable API. +func (lc *LocalClient) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { + res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) if err != nil { return nil, nil, err } diff --git a/client/web/package.json b/client/web/package.json index 598ae3e518b1c..4b3afb1df6ef5 100644 --- a/client/web/package.json +++ b/client/web/package.json @@ -3,7 +3,7 @@ "version": "0.0.1", "license": "BSD-3-Clause", "engines": { - "node": "18.16.1", + "node": "18.20.4", "yarn": "1.22.19" }, "type": "module", diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 630ef0bf6bba2..fdf71c3eae398 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -52,6 +52,12 @@ // ${TS_CERT_DOMAIN}, it will be replaced with the value of the available FQDN. // It cannot be used in conjunction with TS_DEST_IP. The file is watched for changes, // and will be re-applied when it changes. +// - TS_HEALTHCHECK_ADDR_PORT: if specified, an HTTP health endpoint will be +// served at /healthz at the provided address, which should be in form [
]:. +// If not set, no health check will be run. If set to :, addr will default to 0.0.0.0 +// The health endpoint will return 200 OK if this node has at least one tailnet IP address, +// otherwise returns 503. +// NB: the health criteria might change in the future. // - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a // directory that containers tailscaled config in file. The config file needs to be // named cap-.hujson. If this is set, TS_HOSTNAME, @@ -95,6 +101,7 @@ import ( "log" "math" "net" + "net/http" "net/netip" "os" "os/exec" @@ -158,6 +165,7 @@ func main() { AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false), PodIP: defaultEnv("POD_IP", ""), EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), + HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), } if err := cfg.validate(); err != nil { @@ -349,6 +357,9 @@ authLoop: certDomain = new(atomic.Pointer[string]) certDomainChanged = make(chan bool, 1) + + h = &healthz{} // http server for the healthz endpoint + healthzRunner = sync.OnceFunc(func() { runHealthz(cfg.HealthCheckAddrPort, h) }) ) if cfg.ServeConfigPath != "" { go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client) @@ -565,6 +576,13 @@ runLoop: log.Fatalf("storing device IPs and FQDN in Kubernetes Secret: %v", err) } } + + if cfg.HealthCheckAddrPort != "" { + h.Lock() + h.hasAddrs = len(addrs) != 0 + h.Unlock() + healthzRunner() + } } if !startupTasksDone { // For containerboot instances that act as TCP @@ -1152,7 +1170,8 @@ type settings struct { // PodIP is the IP of the Pod if running in Kubernetes. This is used // when setting up rules to proxy cluster traffic to cluster ingress // target. - PodIP string + PodIP string + HealthCheckAddrPort string } func (s *settings) validate() error { @@ -1201,6 +1220,11 @@ func (s *settings) validate() error { if s.EnableForwardingOptimizations && s.UserspaceMode { return errors.New("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS is not supported in userspace mode") } + if s.HealthCheckAddrPort != "" { + if _, err := netip.ParseAddrPort(s.HealthCheckAddrPort); err != nil { + return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) + } + } return nil } @@ -1374,3 +1398,41 @@ func tailscaledConfigFilePath() string { log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion) return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer)) } + +// healthz is a simple health check server, if enabled it returns 200 OK if +// this tailscale node currently has at least one tailnet IP address else +// returns 503. +type healthz struct { + sync.Mutex + hasAddrs bool +} + +func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Lock() + defer h.Unlock() + if h.hasAddrs { + w.Write([]byte("ok")) + } else { + http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError) + } +} + +// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the +// provided address. A containerized tailscale instance is considered healthy if +// it has at least one tailnet IP address. +func runHealthz(addr string, h *healthz) { + lis, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err) + } + mux := http.NewServeMux() + mux.Handle("/healthz", h) + log.Printf("Running healthcheck endpoint at %s/healthz", addr) + hs := &http.Server{Handler: mux} + + go func() { + if err := hs.Serve(lis); err != nil { + log.Fatalf("failed running health endpoint: %v", err) + } + }() +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 825b33facd4c1..1492c4ebb4721 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -7,10 +7,14 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus + github.com/coder/websocket from tailscale.com/cmd/derper+ + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil github.com/fxamacker/cbor/v2 from tailscale.com/tka - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ @@ -82,10 +86,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ - nhooyr.io/websocket from tailscale.com/cmd/derper+ - nhooyr.io/websocket/internal/errd from nhooyr.io/websocket - nhooyr.io/websocket/internal/util from nhooyr.io/websocket - nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket tailscale.com from tailscale.com/version tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/tailscale from tailscale.com/derp @@ -146,9 +146,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/cloudenv from tailscale.com/hostinfo+ W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy tailscale.com/util/ctxkey from tailscale.com/tsweb+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/fastuuid from tailscale.com/tsweb + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineread from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns @@ -159,6 +161,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy from tailscale.com/ipn + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -180,6 +184,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ W golang.org/x/exp/constraints from tailscale.com/util/winutil + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 76151175c5c13..80c9dc44f138f 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -237,7 +237,7 @@ func main() { tsweb.AddBrowserHeaders(w) io.WriteString(w, "User-agent: *\nDisallow: /\n") })) - mux.Handle("/generate_204", http.HandlerFunc(serveNoContent)) + mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent)) debug := tsweb.Debugger(mux) debug.KV("TLS hostname", *hostname) debug.KV("Mesh key", s.HasMeshKey()) @@ -337,7 +337,7 @@ func main() { if *httpPort > -1 { go func() { port80mux := http.NewServeMux() - port80mux.HandleFunc("/generate_204", serveNoContent) + port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent) port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux})) port80srv := &http.Server{ Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)), @@ -378,31 +378,6 @@ func main() { } } -const ( - noContentChallengeHeader = "X-Tailscale-Challenge" - noContentResponseHeader = "X-Tailscale-Response" -) - -// For captive portal detection -func serveNoContent(w http.ResponseWriter, r *http.Request) { - if challenge := r.Header.Get(noContentChallengeHeader); challenge != "" { - badChar := strings.IndexFunc(challenge, func(r rune) bool { - return !isChallengeChar(r) - }) != -1 - if len(challenge) <= 64 && !badChar { - w.Header().Set(noContentResponseHeader, "response "+challenge) - } - } - w.WriteHeader(http.StatusNoContent) -} - -func isChallengeChar(c rune) bool { - // Semi-randomly chosen as a limited set of valid characters - return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || - ('0' <= c && c <= '9') || - c == '.' || c == '-' || c == '_' -} - var validProdHostname = regexp.MustCompile(`^derp([^.]*)\.tailscale\.com\.?$`) func prodAutocertHostPolicy(_ context.Context, host string) error { diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 1af7c3abeacf1..553a78f9f6426 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "tailscale.com/derp/derphttp" "tailscale.com/tstest/deptest" ) @@ -76,20 +77,20 @@ func TestNoContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil) if tt.input != "" { - req.Header.Set(noContentChallengeHeader, tt.input) + req.Header.Set(derphttp.NoContentChallengeHeader, tt.input) } w := httptest.NewRecorder() - serveNoContent(w, req) + derphttp.ServeNoContent(w, req) resp := w.Result() if tt.want == "" { - if h, found := resp.Header[noContentResponseHeader]; found { + if h, found := resp.Header[derphttp.NoContentResponseHeader]; found { t.Errorf("got %+v; expected no response header", h) } return } - if got := resp.Header.Get(noContentResponseHeader); got != tt.want { + if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want { t.Errorf("got %q; want %q", got, tt.want) } }) diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go index 83ab388765056..05f40deb816d5 100644 --- a/cmd/derper/websocket.go +++ b/cmd/derper/websocket.go @@ -10,7 +10,7 @@ import ( "net/http" "strings" - "nhooyr.io/websocket" + "github.com/coder/websocket" "tailscale.com/derp" "tailscale.com/net/wsconn" ) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 117d735820792..1d0ec32c3c064 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -7,8 +7,6 @@ package main import ( "flag" "fmt" - "html" - "io" "log" "net/http" "sort" @@ -70,8 +68,13 @@ func main() { } mux := http.NewServeMux() - tsweb.Debugger(mux) - mux.HandleFunc("/", http.HandlerFunc(serveFunc(p))) + d := tsweb.Debugger(mux) + d.Handle("probe-run", "Run a probe", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{Logf: log.Printf})) + mux.Handle("/", tsweb.StdHandler(p.StatusHandler( + prober.WithTitle("DERP Prober"), + prober.WithPageLink("Prober metrics", "/debug/varz"), + prober.WithProbeLink("Run Probe", "/debug/probe-run?name={{.Name}}"), + ), tsweb.HandlerOptions{Logf: log.Printf})) log.Printf("Listening on %s", *listen) log.Fatal(http.ListenAndServe(*listen, mux)) } @@ -105,26 +108,3 @@ func getOverallStatus(p *prober.Prober) (o overallStatus) { sort.Strings(o.good) return } - -func serveFunc(p *prober.Prober) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - st := getOverallStatus(p) - summary := "All good" - if (float64(len(st.bad)) / float64(len(st.bad)+len(st.good))) > 0.25 { - // Returning a 500 allows monitoring this server externally and configuring - // an alert on HTTP response code. - w.WriteHeader(500) - summary = fmt.Sprintf("%d problems", len(st.bad)) - } - - io.WriteString(w, "\n") - fmt.Fprintf(w, "

derp probe

\n%s:
    ", summary) - for _, s := range st.bad { - fmt.Fprintf(w, "
  • %s
  • \n", html.EscapeString(s)) - } - for _, s := range st.good { - fmt.Fprintf(w, "
  • %s
  • \n", html.EscapeString(s)) - } - io.WriteString(w, "
\n") - } -} diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index 60bee6064d439..c33937ef24959 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -28,19 +28,20 @@ import ( ) var ( - rootFlagSet = flag.NewFlagSet("gitops-pusher", flag.ExitOnError) - policyFname = rootFlagSet.String("policy-file", "./policy.hujson", "filename for policy file") - cacheFname = rootFlagSet.String("cache-file", "./version-cache.json", "filename for the previous known version hash") - timeout = rootFlagSet.Duration("timeout", 5*time.Minute, "timeout for the entire CI run") - githubSyntax = rootFlagSet.Bool("github-syntax", true, "use GitHub Action error syntax (https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message)") - apiServer = rootFlagSet.String("api-server", "api.tailscale.com", "API server to contact") + rootFlagSet = flag.NewFlagSet("gitops-pusher", flag.ExitOnError) + policyFname = rootFlagSet.String("policy-file", "./policy.hujson", "filename for policy file") + cacheFname = rootFlagSet.String("cache-file", "./version-cache.json", "filename for the previous known version hash") + timeout = rootFlagSet.Duration("timeout", 5*time.Minute, "timeout for the entire CI run") + githubSyntax = rootFlagSet.Bool("github-syntax", true, "use GitHub Action error syntax (https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message)") + apiServer = rootFlagSet.String("api-server", "api.tailscale.com", "API server to contact") + failOnManualEdits = rootFlagSet.Bool("fail-on-manual-edits", false, "fail if manual edits to the ACLs in the admin panel are detected; when set to false (the default) only a warning is printed") ) -func modifiedExternallyError() { +func modifiedExternallyError() error { if *githubSyntax { - fmt.Printf("::warning file=%s,line=1,col=1,title=Policy File Modified Externally::The policy file was modified externally in the admin console.\n", *policyFname) + return fmt.Errorf("::warning file=%s,line=1,col=1,title=Policy File Modified Externally::The policy file was modified externally in the admin console.", *policyFname) } else { - fmt.Printf("The policy file was modified externally in the admin console.\n") + return fmt.Errorf("The policy file was modified externally in the admin console.") } } @@ -65,16 +66,22 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte log.Printf("local: %s", localEtag) log.Printf("cache: %s", cache.PrevETag) - if cache.PrevETag != controlEtag { - modifiedExternallyError() - } - if controlEtag == localEtag { cache.PrevETag = localEtag log.Println("no update needed, doing nothing") return nil } + if cache.PrevETag != controlEtag { + if err := modifiedExternallyError(); err != nil { + if *failOnManualEdits { + return err + } else { + fmt.Println(err) + } + } + } + if err := applyNewACL(ctx, client, tailnet, apiKey, *policyFname, controlEtag); err != nil { return err } @@ -106,15 +113,21 @@ func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(contex log.Printf("local: %s", localEtag) log.Printf("cache: %s", cache.PrevETag) - if cache.PrevETag != controlEtag { - modifiedExternallyError() - } - if controlEtag == localEtag { log.Println("no updates found, doing nothing") return nil } + if cache.PrevETag != controlEtag { + if err := modifiedExternallyError(); err != nil { + if *failOnManualEdits { + return err + } else { + fmt.Println(err) + } + } + } + if err := testNewACLs(ctx, client, tailnet, apiKey, *policyFname); err != nil { return err } diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b5c0ed517945f..45048e52ed533 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -5,7 +5,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - LD github.com/anmitsu/go-shlex from tailscale.com/tempfork/gliderlabs/ssh L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ @@ -81,8 +80,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus github.com/bits-and-blooms/bitset from github.com/gaissmai/bart 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus + github.com/coder/websocket from tailscale.com/control/controlhttp+ + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ @@ -98,7 +100,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/fsnotify/fsnotify from sigs.k8s.io/controller-runtime/pkg/certwatcher github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json/jsontext+ @@ -113,7 +115,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference github.com/go-openapi/swag from github.com/go-openapi/jsonpointer+ - L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns+ + L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns 💣 github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+ github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+ github.com/golang/groupcache/lru from k8s.io/client-go/tools/record+ @@ -161,7 +163,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/kortschak/wol from tailscale.com/ipn/ipnlocal - LD github.com/kr/fs from github.com/pkg/sftp github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag @@ -183,8 +184,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream github.com/pkg/errors from github.com/evanphx/json-patch/v5+ - LD github.com/pkg/sftp from tailscale.com/ssh/tailssh - LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics @@ -207,7 +206,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh - LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+ + LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper @@ -230,7 +229,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck - LD github.com/u-root/u-root/pkg/termios from tailscale.com/ssh/tailssh L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink @@ -307,7 +305,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/stack+ - gvisor.dev/gvisor/pkg/tcpip/link/channel from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ @@ -317,6 +314,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ @@ -423,6 +421,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/util/naming from k8s.io/apimachinery/pkg/runtime+ k8s.io/apimachinery/pkg/util/net from k8s.io/apimachinery/pkg/watch+ k8s.io/apimachinery/pkg/util/rand from k8s.io/apiserver/pkg/storage/names + k8s.io/apimachinery/pkg/util/remotecommand from tailscale.com/k8s-operator/sessionrecording/ws k8s.io/apimachinery/pkg/util/runtime from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ k8s.io/apimachinery/pkg/util/sets from k8s.io/apimachinery/pkg/api/meta+ k8s.io/apimachinery/pkg/util/strategicpatch from k8s.io/client-go/tools/record+ @@ -600,10 +599,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/utils/ptr from k8s.io/client-go/tools/cache+ k8s.io/utils/strings/slices from k8s.io/apimachinery/pkg/labels k8s.io/utils/trace from k8s.io/client-go/tools/cache - nhooyr.io/websocket from tailscale.com/control/controlhttp+ - nhooyr.io/websocket/internal/errd from nhooyr.io/websocket - nhooyr.io/websocket/internal/util from nhooyr.io/websocket - nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator sigs.k8s.io/controller-runtime/pkg/cache from sigs.k8s.io/controller-runtime/pkg/cluster+ sigs.k8s.io/controller-runtime/pkg/cache/internal from sigs.k8s.io/controller-runtime/pkg/cache @@ -660,7 +655,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ tailscale.com/clientupdate/distsign from tailscale.com/clientupdate - LD tailscale.com/cmd/tailscaled/childproc from tailscale.com/ssh/tailssh tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -692,6 +686,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1 tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+ + tailscale.com/k8s-operator/sessionrecording from tailscale.com/cmd/k8s-operator + tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording + tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ + tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording tailscale.com/kube from tailscale.com/cmd/k8s-operator+ tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy @@ -701,6 +699,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ @@ -743,16 +742,15 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ - 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/k8s-operator + tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/syncs from tailscale.com/control/controlknobs+ tailscale.com/tailcfg from tailscale.com/client/tailscale+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ - LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tka from tailscale.com/client/tailscale+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ - tailscale.com/tsnet from tailscale.com/cmd/k8s-operator + tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/derp+ @@ -807,6 +805,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ @@ -837,7 +837,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+ + LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ @@ -848,7 +848,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ @@ -865,6 +864,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from net+ + golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator golang.org/x/oauth2/internal from golang.org/x/oauth2+ @@ -953,7 +953,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/internal from log+ log/slog from github.com/go-logr/logr+ log/slog/internal from log/slog - LD log/syslog from tailscale.com/ssh/tailssh maps from sigs.k8s.io/controller-runtime/pkg/predicate+ math from archive/tar+ math/big from crypto/dsa+ diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index 2fa6d69510ed9..bba87bf255910 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -24,6 +24,7 @@ import ( operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/util/mak" + "tailscale.com/util/set" ) const ( @@ -167,36 +168,49 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS } } - // Get the Pod IP addresses for the proxy from the EndpointSlice for the - // headless Service. + // Get the Pod IP addresses for the proxy from the EndpointSlices for + // the headless Service. The Service can have multiple EndpointSlices + // associated with it, for example in dual-stack clusters. labels := map[string]string{discoveryv1.LabelServiceName: headlessSvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - eps, err := getSingleObject[discoveryv1.EndpointSlice](ctx, dnsRR.Client, dnsRR.tsNamespace, labels) - if err != nil { - return fmt.Errorf("error getting the EndpointSlice for the proxy's headless Service: %w", err) + var eps = new(discoveryv1.EndpointSliceList) + if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error listing EndpointSlices for the proxy's headless Service: %w", err) } - if eps == nil { + if len(eps.Items) == 0 { logger.Debugf("proxy's headless Service EndpointSlice does not yet exist. We will reconcile again once it's created") return nil } - // An EndpointSlice for a Service can have a list of endpoints that each + // Each EndpointSlice for a Service can have a list of endpoints that each // can have multiple addresses - these are the IP addresses of any Pods // selected by that Service. Pick all the IPv4 addresses. - ips := make([]string, 0) - for _, ep := range eps.Endpoints { - for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips = append(ips, ip) + // It is also possible that multiple EndpointSlices have overlapping addresses. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints + ips := make(set.Set[string], 0) + for _, slice := range eps.Items { + if slice.AddressType != discoveryv1.AddressTypeIPv4 { + logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) + continue + } + for _, ep := range slice.Endpoints { + if !epIsReady(&ep) { + logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) + continue + } + for _, ip := range ep.Addresses { + if !net.IsIPv4String(ip) { + logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) + } else { + ips.Add(ip) + } } } } - if len(ips) == 0 { + if ips.Len() == 0 { logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses. We will reconcile again once they are created.") return nil } updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips) + mak.Set(&rec.IP4, fqdn, ips.Slice()) } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) @@ -204,6 +218,17 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return nil } +// epIsReady reports whether the endpoint is currently in a state to receive new +// traffic. As per kube docs, only explicitly set 'false' for 'Ready' or +// 'Serving' conditions or explicitly set 'true' for 'Terminating' condition +// means that the Endpoint is NOT ready. +// https://github.com/kubernetes/kubernetes/blob/60c4c2b2521fb454ce69dee737e3eb91a25e0535/pkg/apis/discovery/types.go#L109-L131 +func epIsReady(ep *discoveryv1.Endpoint) bool { + return (ep.Conditions.Ready == nil || *ep.Conditions.Ready) && + (ep.Conditions.Serving == nil || *ep.Conditions.Serving) && + (ep.Conditions.Terminating == nil || !*ep.Conditions.Terminating) +} + // maybeCleanup ensures that the DNS record for the proxy has been removed from // dnsrecords ConfigMap and the tailscale.com/dns-records-reconciler finalizer // has been removed from the Service. If the record is not found in the diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 67016e2c6d83b..389461b85f340 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -87,13 +88,16 @@ func TestDNSRecordsReconciler(t *testing.T) { }, } headlessForEgressSvcFQDN := headlessSvcForParent(egressSvcFQDN, "svc") // create the proxy headless Service - ep := endpointSliceForService(headlessForEgressSvcFQDN, "10.9.8.7") + ep := endpointSliceForService(headlessForEgressSvcFQDN, "10.9.8.7", discoveryv1.AddressTypeIPv4) + epv6 := endpointSliceForService(headlessForEgressSvcFQDN, "2600:1900:4011:161:0:d:0:d", discoveryv1.AddressTypeIPv6) + mustCreate(t, fc, egressSvcFQDN) mustCreate(t, fc, headlessForEgressSvcFQDN) mustCreate(t, fc, ep) + mustCreate(t, fc, epv6) expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service // ConfigMap should now have a record for foo.bar.ts.net -> 10.8.8.7 - wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} + wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} // IPv6 endpoint is currently ignored expectHostsRecords(t, fc, wantHosts) // 2. DNS record is updated if tailscale.com/tailnet-fqdn annotation's @@ -106,7 +110,7 @@ func TestDNSRecordsReconciler(t *testing.T) { expectHostsRecords(t, fc, wantHosts) // 3. DNS record is updated if the IP address of the proxy Pod changes. - ep = endpointSliceForService(headlessForEgressSvcFQDN, "10.6.5.4") + ep = endpointSliceForService(headlessForEgressSvcFQDN, "10.6.5.4", discoveryv1.AddressTypeIPv4) mustUpdate(t, fc, ep.Namespace, ep.Name, func(ep *discoveryv1.EndpointSlice) { ep.Endpoints[0].Addresses = []string{"10.6.5.4"} }) @@ -116,7 +120,7 @@ func TestDNSRecordsReconciler(t *testing.T) { // 4. DNS record is created for an ingress proxy configured via Ingress headlessForIngress := headlessSvcForParent(ing, "ingress") - ep = endpointSliceForService(headlessForIngress, "10.9.8.7") + ep = endpointSliceForService(headlessForIngress, "10.9.8.7", discoveryv1.AddressTypeIPv4) mustCreate(t, fc, headlessForIngress) mustCreate(t, fc, ep) expectReconciled(t, dnsRR, "tailscale", "ts-ingress") // dns-records-reconciler should reconcile the headless Service @@ -140,6 +144,17 @@ func TestDNSRecordsReconciler(t *testing.T) { expectReconciled(t, dnsRR, "tailscale", "ts-ingress") wantHosts["another.ingress.ts.net"] = []string{"7.8.9.10"} expectHostsRecords(t, fc, wantHosts) + + // 7. A not-ready Endpoint is removed from DNS config. + mustUpdate(t, fc, ep.Namespace, ep.Name, func(ep *discoveryv1.EndpointSlice) { + ep.Endpoints[0].Conditions.Ready = ptr.To(false) + ep.Endpoints = append(ep.Endpoints, discoveryv1.Endpoint{ + Addresses: []string{"1.2.3.4"}, + }) + }) + expectReconciled(t, dnsRR, "tailscale", "ts-ingress") + wantHosts["another.ingress.ts.net"] = []string{"1.2.3.4"} + expectHostsRecords(t, fc, wantHosts) } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { @@ -162,15 +177,21 @@ func headlessSvcForParent(o client.Object, typ string) *corev1.Service { } } -func endpointSliceForService(svc *corev1.Service, ip string) *discoveryv1.EndpointSlice { +func endpointSliceForService(svc *corev1.Service, ip string, fam discoveryv1.AddressType) *discoveryv1.EndpointSlice { return &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ - Name: svc.Name, + Name: fmt.Sprintf("%s-%s", svc.Name, string(fam)), Namespace: svc.Namespace, Labels: map[string]string{discoveryv1.LabelServiceName: svc.Name}, }, + AddressType: fam, Endpoints: []discoveryv1.Endpoint{{ Addresses: []string{ip}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), + }, }}, } } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 114fd563abe81..18665bd8f028c 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -51,8 +51,8 @@ import ( // Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart. //go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests -// Generate CRD docs from the yamls -//go:generate go run fybrik.io/crdoc --resources=./deploy/crds --output=../../k8s-operator/api.md +// Generate CRD API docs. +//go:generate go run github.com/elastic/crd-ref-docs --renderer=markdown --source-path=../../k8s-operator/apis/ --config=../../k8s-operator/api-docs-config.yaml --output-path=../../k8s-operator/api.md func main() { // Required to use our client API. We're fine with the instability since the diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go index 258a958fa04c3..3d092fe34775f 100644 --- a/cmd/k8s-operator/proxy.go +++ b/cmd/k8s-operator/proxy.go @@ -22,8 +22,8 @@ import ( "k8s.io/client-go/transport" "tailscale.com/client/tailscale" "tailscale.com/client/tailscale/apitype" + ksr "tailscale.com/k8s-operator/sessionrecording" tskube "tailscale.com/kube" - "tailscale.com/ssh/tailssh" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -36,12 +36,6 @@ var whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) var ( // counterNumRequestsproxies counts the number of API server requests proxied via this proxy. counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests_proxied") - - // counterSessionRecordingsAttempted counts the number of session recording attempts. - counterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy__session_recordings_attempted") - - // counterSessionRecordingsUploaded counts the number of successfully uploaded session recordings. - counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded") ) type apiServerProxyMode int @@ -173,7 +167,8 @@ func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredL mux := http.NewServeMux() mux.HandleFunc("/", ap.serveDefault) - mux.HandleFunc("/api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExec) + mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) + mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecWS) hs := &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is @@ -214,9 +209,19 @@ func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -// serveExec serves 'kubectl exec' requests, optionally configuring the kubectl -// exec sessions to be recorded. -func (ap *apiserverProxy) serveExec(w http.ResponseWriter, r *http.Request) { +// serveExecSPDY serves 'kubectl exec' requests for sessions streamed over SPDY, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *apiserverProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { + ap.execForProto(w, r, ksr.SPDYProtocol) +} + +// serveExecWS serves 'kubectl exec' requests for sessions streamed over WebSocket, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *apiserverProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { + ap.execForProto(w, r, ksr.WSProtocol) +} + +func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { who, err := ap.whoIs(r) if err != nil { ap.authError(w, err) @@ -232,15 +237,17 @@ func (ap *apiserverProxy) serveExec(w http.ResponseWriter, r *http.Request) { ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) return } - counterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded + ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded if !failOpen && len(addrs) == 0 { msg := "forbidden: 'kubectl exec' session must be recorded, but no recorders are available." ap.log.Error(msg) http.Error(w, msg, http.StatusForbidden) return } - if r.Method != "POST" || r.Header.Get("Upgrade") != "SPDY/3.1" { - msg := "'kubectl exec' session recording is configured, but the request is not over SPDY. Session recording is currently only supported for SPDY based clients" + + wantsHeader := upgradeHeaderForProto[proto] + if h := r.Header.Get("Upgrade"); h != wantsHeader { + msg := fmt.Sprintf("[unexpected] unable to verify that streaming protocol is %s, wants Upgrade header %q, got: %q", proto, wantsHeader, h) if failOpen { msg = msg + "; failure mode is 'fail open'; continuing session without recording." ap.log.Warn(msg) @@ -252,20 +259,22 @@ func (ap *apiserverProxy) serveExec(w http.ResponseWriter, r *http.Request) { http.Error(w, msg, http.StatusForbidden) return } - spdyH := &spdyHijacker{ - ts: ap.ts, - req: r, - who: who, - ResponseWriter: w, - log: ap.log, - pod: r.PathValue("pod"), - ns: r.PathValue("namespace"), - addrs: addrs, - failOpen: failOpen, - connectToRecorder: tailssh.ConnectToRecorder, + + opts := ksr.HijackerOpts{ + Req: r, + W: w, + Proto: proto, + TS: ap.ts, + Who: who, + Addrs: addrs, + FailOpen: failOpen, + Pod: r.PathValue("pod"), + Namespace: r.PathValue("namespace"), + Log: ap.log, } + h := ksr.New(opts) - ap.rp.ServeHTTP(spdyH, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { @@ -398,3 +407,8 @@ func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorde } return failOpen, recorderAddresses, nil } + +var upgradeHeaderForProto = map[ksr.Protocol]string{ + ksr.SPDYProtocol: "SPDY/3.1", + ksr.WSProtocol: "websocket", +} diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index a4d25922a29f6..950fdc2cddb84 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// The stunstamp binary measures STUN round-trip latency with DERPs. +// The stunstamp binary measures round-trip latency with DERPs. package main import ( "bytes" "cmp" "context" + "crypto/tls" "encoding/json" "errors" "flag" @@ -22,6 +23,7 @@ import ( "net/url" "os" "os/signal" + "runtime" "slices" "strconv" "strings" @@ -31,8 +33,10 @@ import ( "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" + "github.com/tcnksm/go-httpstat" "tailscale.com/logtail/backoff" "tailscale.com/net/stun" + "tailscale.com/net/tcpinfo" "tailscale.com/tailcfg" ) @@ -42,7 +46,10 @@ var ( flagIPv6 = flag.Bool("ipv6", false, "probe IPv6 addresses") flagRemoteWriteURL = flag.String("rw-url", "", "prometheus remote write URL") flagInstance = flag.String("instance", "", "instance label value; defaults to hostname if unspecified") - flagDstPorts = flag.String("dst-ports", "", "comma-separated list of destination ports to monitor") + flagSTUNDstPorts = flag.String("stun-dst-ports", "", "comma-separated list of STUN destination ports to monitor") + flagHTTPSDstPorts = flag.String("https-dst-ports", "", "comma-separated list of HTTPS destination ports to monitor") + flagTCPDstPorts = flag.String("tcp-dst-ports", "", "comma-separated list of TCP destination ports to monitor") + flagICMP = flag.Bool("icmp", false, "probe ICMP") ) const ( @@ -89,12 +96,22 @@ func (t timestampSource) String() string { } } +type protocol string + +const ( + protocolSTUN protocol = "stun" + protocolICMP protocol = "icmp" + protocolHTTPS protocol = "https" + protocolTCP protocol = "tcp" +) + // resultKey contains the stable dimensions and their values for a given // timeseries, i.e. not time and not rtt/timeout. type resultKey struct { meta nodeMeta timestampSource timestampSource connStability connStability + protocol protocol dstPort int } @@ -104,7 +121,203 @@ type result struct { rtt *time.Duration // nil signifies failure, e.g. timeout } -func measureRTT(conn io.ReadWriteCloser, dst *net.UDPAddr) (rtt time.Duration, err error) { +type lportsPool struct { + sync.Mutex + ports []int +} + +func (l *lportsPool) get() int { + l.Lock() + defer l.Unlock() + ret := l.ports[0] + l.ports = append(l.ports[:0], l.ports[1:]...) + return ret +} + +func (l *lportsPool) put(i int) { + l.Lock() + defer l.Unlock() + l.ports = append(l.ports, int(i)) +} + +var ( + lports *lportsPool +) + +const ( + lportPoolSize = 16000 + lportBase = 2048 +) + +func init() { + lports = &lportsPool{ + ports: make([]int, 0, lportPoolSize), + } + for i := lportBase; i < lportBase+lportPoolSize; i++ { + lports.ports = append(lports.ports, i) + } +} + +// lportForTCPConn satisfies io.ReadWriteCloser, but is really just used to pass +// around a persistent laddr for stableConn purposes. The underlying TCP +// connection is not created until measurement time as in some cases we need to +// measure dial time. +type lportForTCPConn int + +func (l *lportForTCPConn) Close() error { + if *l == 0 { + return nil + } + lports.put(int(*l)) + return nil +} + +func (l *lportForTCPConn) Write([]byte) (int, error) { + return 0, errors.New("unimplemented") +} + +func (l *lportForTCPConn) Read([]byte) (int, error) { + return 0, errors.New("unimplemented") +} + +func addrInUse(err error, lport *lportForTCPConn) bool { + if errors.Is(err, syscall.EADDRINUSE) { + old := int(*lport) + // abandon port, don't return it to pool + *lport = lportForTCPConn(lports.get()) // get a new port + log.Printf("EADDRINUSE: %v old: %d new: %d", err, old, *lport) + return true + } + return false +} + +func tcpDial(ctx context.Context, lport *lportForTCPConn, dst netip.AddrPort) (net.Conn, error) { + for { + var opErr error + dialer := &net.Dialer{ + LocalAddr: &net.TCPAddr{ + Port: int(*lport), + }, + Control: func(network, address string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + // we may restart faster than TIME_WAIT can clear + opErr = setSOReuseAddr(fd) + }) + }, + } + if opErr != nil { + panic(opErr) + } + tcpConn, err := dialer.DialContext(ctx, "tcp", dst.String()) + if err != nil { + if addrInUse(err, lport) { + continue + } + return nil, err + } + return tcpConn, nil + } +} + +type tempError struct { + error +} + +func (t tempError) Temporary() bool { + return true +} + +func measureTCPRTT(conn io.ReadWriteCloser, _ string, dst netip.AddrPort) (rtt time.Duration, err error) { + lport, ok := conn.(*lportForTCPConn) + if !ok { + return 0, fmt.Errorf("unexpected conn type: %T", conn) + } + // Set a dial timeout < 1s (TCP_TIMEOUT_INIT on Linux) as a means to avoid + // SYN retries, which can contribute to tcpi->rtt below. This simply limits + // retries from the initiator, but SYN+ACK on the reverse path can also + // time out and be retransmitted. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*750) + defer cancel() + tcpConn, err := tcpDial(ctx, lport, dst) + if err != nil { + return 0, tempError{err} + } + defer tcpConn.Close() + // This is an unreliable method to measure TCP RTT. The Linux kernel + // describes it as such in tcp_rtt_estimator(). We take some care in how we + // hold tcp_info->rtt here, e.g. clamping dial timeout, but if we are to + // actually use this elsewhere as an input to some decision it warrants a + // deeper study and consideration for alternative methods. Its usefulness + // here is as a point of comparison against the other methods. + rtt, err = tcpinfo.RTT(tcpConn) + if err != nil { + return 0, tempError{err} + } + return rtt, nil +} + +func measureHTTPSRTT(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) { + lport, ok := conn.(*lportForTCPConn) + if !ok { + return 0, fmt.Errorf("unexpected conn type: %T", conn) + } + var httpResult httpstat.Result + // 5s mirrors net/netcheck.overallProbeTimeout used in net/netcheck.Client.measureHTTPSLatency. + reqCtx, cancel := context.WithTimeout(httpstat.WithHTTPStat(context.Background(), &httpResult), time.Second*5) + defer cancel() + reqURL := "https://" + dst.String() + "/derp/latency-check" + req, err := http.NewRequestWithContext(reqCtx, "GET", reqURL, nil) + if err != nil { + return 0, err + } + client := &http.Client{} + // 1.5s mirrors derp/derphttp.dialnodeTimeout used in derp/derphttp.DialNode(). + dialCtx, dialCancel := context.WithTimeout(reqCtx, time.Millisecond*1500) + defer dialCancel() + tcpConn, err := tcpDial(dialCtx, lport, dst) + if err != nil { + return 0, tempError{err} + } + defer tcpConn.Close() + tlsConn := tls.Client(tcpConn, &tls.Config{ + ServerName: hostname, + }) + // Mirror client/netcheck behavior, which handshakes before handing the + // tlsConn over to the http.Client via http.Transport + err = tlsConn.Handshake() + if err != nil { + return 0, tempError{err} + } + tlsConnCh := make(chan net.Conn, 1) + tlsConnCh <- tlsConn + tr := &http.Transport{ + DialTLSContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { + select { + case tlsConn := <-tlsConnCh: + return tlsConn, nil + default: + return nil, errors.New("unexpected second call of DialTLSContext") + } + }, + } + client.Transport = tr + resp, err := client.Do(req) + if err != nil { + return 0, tempError{err} + } + if resp.StatusCode/100 != 2 { + return 0, tempError{fmt.Errorf("unexpected status code: %d", resp.StatusCode)} + } + defer resp.Body.Close() + _, err = io.Copy(io.Discard, io.LimitReader(resp.Body, 8<<10)) + if err != nil { + return 0, tempError{err} + } + httpResult.End(time.Now()) + return httpResult.ServerProcessing, nil +} + +func measureSTUNRTT(conn io.ReadWriteCloser, _ string, dst netip.AddrPort) (rtt time.Duration, err error) { uconn, ok := conn.(*net.UDPConn) if !ok { return 0, fmt.Errorf("unexpected conn type: %T", conn) @@ -116,7 +329,10 @@ func measureRTT(conn io.ReadWriteCloser, dst *net.UDPAddr) (rtt time.Duration, e txID := stun.NewTxID() req := stun.Request(txID) txAt := time.Now() - _, err = uconn.WriteToUDP(req, dst) + _, err = uconn.WriteToUDP(req, &net.UDPAddr{ + IP: dst.Addr().AsSlice(), + Port: int(dst.Port()), + }) if err != nil { return 0, fmt.Errorf("error writing to udp socket: %w", err) } @@ -153,20 +369,19 @@ type nodeMeta struct { addr netip.Addr } -type measureFn func(conn io.ReadWriteCloser, dst *net.UDPAddr) (rtt time.Duration, err error) +type measureFn func(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) -// probe measures STUN round trip time for the node described by meta over -// conn against dstPort. It may return a nil duration and nil error if the -// STUN request timed out. A non-nil error indicates an unrecoverable or -// non-temporary error. -func probe(meta nodeMeta, conn io.ReadWriteCloser, fn measureFn, dstPort int) (*time.Duration, error) { +// probe measures round trip time for the node described by meta over cf against +// dstPort. It may return a nil duration and nil error in the event of a +// timeout. A non-nil error indicates an unrecoverable or non-temporary error. +func probe(meta nodeMeta, cf *connAndMeasureFn, dstPort int) (*time.Duration, error) { ua := &net.UDPAddr{ IP: net.IP(meta.addr.AsSlice()), Port: dstPort, } time.Sleep(rand.N(200 * time.Millisecond)) // jitter across tx - rtt, err := fn(conn, ua) + rtt, err := cf.fn(cf.conn, meta.hostname, netip.AddrPortFrom(meta.addr, uint16(dstPort))) if err != nil { if isTemporaryOrTimeoutErr(err) { log.Printf("temp error measuring RTT to %s(%s): %v", meta.hostname, ua.String(), err) @@ -237,43 +452,138 @@ func nodeMetaFromDERPMap(dm *tailcfg.DERPMap, nodeMetaByAddr map[netip.Addr]node return stale, nil } -func getStableConns(stableConns map[netip.Addr]map[int][2]io.ReadWriteCloser, addr netip.Addr, dstPort int) ([2]io.ReadWriteCloser, error) { - conns := [2]io.ReadWriteCloser{} - byDstPort, ok := stableConns[addr] - if ok { - conns, ok = byDstPort[dstPort] - if ok { - return conns, nil +type connAndMeasureFn struct { + conn io.ReadWriteCloser + fn measureFn +} + +// newConnAndMeasureFn returns a connAndMeasureFn or an error. It may return +// nil for both if some combination of the supplied timestampSource, protocol, +// or connStability is unsupported. +func newConnAndMeasureFn(source timestampSource, protocol protocol, stable connStability) (*connAndMeasureFn, error) { + info := getProtocolSupportInfo(protocol) + if !info.stableConn && bool(stable) { + return nil, nil + } + if !info.userspaceTS && source == timestampSourceUserspace { + return nil, nil + } + if !info.kernelTS && source == timestampSourceKernel { + return nil, nil + } + switch protocol { + case protocolSTUN: + if source == timestampSourceKernel { + conn, err := getUDPConnKernelTimestamp() + if err != nil { + return nil, err + } + return &connAndMeasureFn{ + conn: conn, + fn: measureSTUNRTTKernel, + }, nil + } else { + conn, err := net.ListenUDP("udp", &net.UDPAddr{}) + if err != nil { + return nil, err + } + return &connAndMeasureFn{ + conn: conn, + fn: measureSTUNRTT, + }, nil + } + case protocolICMP: + // TODO(jwhited): implement + return nil, nil + case protocolHTTPS: + localPort := 0 + if stable { + localPort = lports.get() + } + conn := lportForTCPConn(localPort) + return &connAndMeasureFn{ + conn: &conn, + fn: measureHTTPSRTT, + }, nil + case protocolTCP: + localPort := 0 + if stable { + localPort = lports.get() } + conn := lportForTCPConn(localPort) + return &connAndMeasureFn{ + conn: &conn, + fn: measureTCPRTT, + }, nil } - if supportsKernelTS() { - kconn, err := getConnKernelTimestamp() + return nil, errors.New("unknown protocol") +} + +type stableConnKey struct { + node netip.Addr + protocol protocol + port int +} + +type protocolSupportInfo struct { + kernelTS bool + userspaceTS bool + stableConn bool +} + +func getConns( + stableConns map[stableConnKey][2]*connAndMeasureFn, + addr netip.Addr, + protocol protocol, + dstPort int, +) (stable, unstable [2]*connAndMeasureFn, err error) { + key := stableConnKey{addr, protocol, dstPort} + defer func() { if err != nil { - return conns, err + for _, source := range []timestampSource{timestampSourceUserspace, timestampSourceKernel} { + c := stable[source] + if c != nil { + c.conn.Close() + } + c = unstable[source] + if c != nil { + c.conn.Close() + } + } } - conns[timestampSourceKernel] = kconn - } - uconn, err := net.ListenUDP("udp", &net.UDPAddr{}) - if err != nil { - if supportsKernelTS() { - conns[timestampSourceKernel].Close() + }() + + var ok bool + stable, ok = stableConns[key] + if !ok { + for _, source := range []timestampSource{timestampSourceUserspace, timestampSourceKernel} { + var cf *connAndMeasureFn + cf, err = newConnAndMeasureFn(source, protocol, stableConn) + if err != nil { + return + } + stable[source] = cf } - return conns, err + stableConns[key] = stable } - conns[timestampSourceUserspace] = uconn - if byDstPort == nil { - byDstPort = make(map[int][2]io.ReadWriteCloser) + + for _, source := range []timestampSource{timestampSourceUserspace, timestampSourceKernel} { + var cf *connAndMeasureFn + cf, err = newConnAndMeasureFn(source, protocol, unstableConn) + if err != nil { + return + } + unstable[source] = cf } - byDstPort[dstPort] = conns - stableConns[addr] = byDstPort - return conns, nil + return stable, unstable, nil } -// probeNodes measures the round-trip time for STUN binding requests against the -// DERP nodes described by nodeMetaByAddr while using/updating stableConns for -// UDP sockets that should be recycled across runs. It returns the results or -// an error if one occurs. -func probeNodes(nodeMetaByAddr map[netip.Addr]nodeMeta, stableConns map[netip.Addr]map[int][2]io.ReadWriteCloser, dstPorts []int) ([]result, error) { +// probeNodes measures the round-trip time for the protocols and ports described +// by portsByProtocol against the DERP nodes described by nodeMetaByAddr. +// stableConns are used to recycle connections across calls to probeNodes. +// probeNodes is also responsible for trimming stableConns based on node +// lifetime in nodeMetaByAddr. It returns the results or an error if one occurs. +func probeNodes(nodeMetaByAddr map[netip.Addr]nodeMeta, stableConns map[stableConnKey][2]*connAndMeasureFn, portsByProtocol map[protocol][]int) ([]result, error) { wg := sync.WaitGroup{} results := make([]result, 0) resultsCh := make(chan result) @@ -283,40 +593,19 @@ func probeNodes(nodeMetaByAddr map[netip.Addr]nodeMeta, stableConns map[netip.Ad at := time.Now() addrsToProbe := make(map[netip.Addr]bool) - doProbe := func(conn io.ReadWriteCloser, meta nodeMeta, source timestampSource, dstPort int) { + doProbe := func(cf *connAndMeasureFn, meta nodeMeta, source timestampSource, stable connStability, protocol protocol, dstPort int) { defer wg.Done() r := result{ key: resultKey{ meta: meta, timestampSource: source, + connStability: stable, dstPort: dstPort, + protocol: protocol, }, at: at, } - if conn == nil { - var err error - if source == timestampSourceKernel { - conn, err = getConnKernelTimestamp() - } else { - conn, err = net.ListenUDP("udp", &net.UDPAddr{}) - } - if err != nil { - select { - case <-doneCh: - return - case errCh <- err: - return - } - } - defer conn.Close() - } else { - r.key.connStability = stableConn - } - fn := measureRTT - if source == timestampSourceKernel { - fn = measureRTTKernel - } - rtt, err := probe(meta, conn, fn, dstPort) + rtt, err := probe(meta, cf, dstPort) if err != nil { select { case <-doneCh: @@ -334,37 +623,42 @@ func probeNodes(nodeMetaByAddr map[netip.Addr]nodeMeta, stableConns map[netip.Ad for _, meta := range nodeMetaByAddr { addrsToProbe[meta.addr] = true - for _, port := range dstPorts { - stable, err := getStableConns(stableConns, meta.addr, port) - if err != nil { - close(doneCh) - wg.Wait() - return nil, err - } + for p, ports := range portsByProtocol { + for _, port := range ports { + stable, unstable, err := getConns(stableConns, meta.addr, p, port) + if err != nil { + close(doneCh) + wg.Wait() + return nil, err + } + + for i, cf := range stable { + if cf != nil { + wg.Add(1) + numProbes++ + go doProbe(cf, meta, timestampSource(i), stableConn, p, port) + } + } - wg.Add(2) - numProbes += 2 - go doProbe(stable[timestampSourceUserspace], meta, timestampSourceUserspace, port) - go doProbe(nil, meta, timestampSourceUserspace, port) - if supportsKernelTS() { - wg.Add(2) - numProbes += 2 - go doProbe(stable[timestampSourceKernel], meta, timestampSourceKernel, port) - go doProbe(nil, meta, timestampSourceKernel, port) + for i, cf := range unstable { + if cf != nil { + wg.Add(1) + numProbes++ + go doProbe(cf, meta, timestampSource(i), unstableConn, p, port) + } + } } } } // cleanup conns we no longer need - for k, byDstPort := range stableConns { - if !addrsToProbe[k] { - for _, conns := range byDstPort { - if conns[timestampSourceKernel] != nil { - conns[timestampSourceKernel].Close() - } - conns[timestampSourceUserspace].Close() - delete(stableConns, k) + for k, cf := range stableConns { + if !addrsToProbe[k.node] { + if cf[timestampSourceKernel] != nil { + cf[timestampSourceKernel].conn.Close() } + cf[timestampSourceUserspace].conn.Close() + delete(stableConns, k) } } @@ -391,11 +685,11 @@ const ( ) const ( - rttMetricName = "stunstamp_derp_stun_rtt_ns" - timeoutsMetricName = "stunstamp_derp_stun_timeouts_total" + rttMetricName = "stunstamp_derp_rtt_ns" + timeoutsMetricName = "stunstamp_derp_timeouts_total" ) -func timeSeriesLabels(metricName string, meta nodeMeta, instance string, source timestampSource, stability connStability, dstPort int) []prompb.Label { +func timeSeriesLabels(metricName string, meta nodeMeta, instance string, source timestampSource, stability connStability, protocol protocol, dstPort int) []prompb.Label { addressFamily := "ipv4" if meta.addr.Is6() { addressFamily = "ipv6" @@ -425,6 +719,10 @@ func timeSeriesLabels(metricName string, meta nodeMeta, instance string, source Name: "hostname", Value: meta.hostname, }) + labels = append(labels, prompb.Label{ + Name: "protocol", + Value: string(protocol), + }) labels = append(labels, prompb.Label{ Name: "dst_port", Value: strconv.Itoa(dstPort), @@ -453,53 +751,35 @@ const ( staleNaN uint64 = 0x7ff0000000000002 ) -func staleMarkersFromNodeMeta(stale []nodeMeta, instance string, dstPorts []int) []prompb.TimeSeries { +func staleMarkersFromNodeMeta(stale []nodeMeta, instance string, portsByProtocol map[protocol][]int) []prompb.TimeSeries { staleMarkers := make([]prompb.TimeSeries, 0) now := time.Now() - for _, s := range stale { - for _, dstPort := range dstPorts { - samples := []prompb.Sample{ - { - Timestamp: now.UnixMilli(), - Value: math.Float64frombits(staleNaN), - }, - } - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(rttMetricName, s, instance, timestampSourceUserspace, unstableConn, dstPort), - Samples: samples, - }) - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(rttMetricName, s, instance, timestampSourceUserspace, stableConn, dstPort), - Samples: samples, - }) - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(timeoutsMetricName, s, instance, timestampSourceUserspace, unstableConn, dstPort), - Samples: samples, - }) - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(timeoutsMetricName, s, instance, timestampSourceUserspace, stableConn, dstPort), - Samples: samples, - }) - if supportsKernelTS() { - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(rttMetricName, s, instance, timestampSourceKernel, unstableConn, dstPort), - Samples: samples, - }) - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(rttMetricName, s, instance, timestampSourceKernel, stableConn, dstPort), - Samples: samples, - }) - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(timeoutsMetricName, s, instance, timestampSourceKernel, unstableConn, dstPort), - Samples: samples, - }) - staleMarkers = append(staleMarkers, prompb.TimeSeries{ - Labels: timeSeriesLabels(timeoutsMetricName, s, instance, timestampSourceKernel, stableConn, dstPort), - Samples: samples, - }) + + for p, ports := range portsByProtocol { + for _, port := range ports { + for _, s := range stale { + samples := []prompb.Sample{ + { + Timestamp: now.UnixMilli(), + Value: math.Float64frombits(staleNaN), + }, + } + // We send stale markers for all combinations in the interest + // of simplicity. + for _, name := range []string{rttMetricName, timeoutsMetricName} { + for _, source := range []timestampSource{timestampSourceUserspace, timestampSourceKernel} { + for _, stable := range []connStability{unstableConn, stableConn} { + staleMarkers = append(staleMarkers, prompb.TimeSeries{ + Labels: timeSeriesLabels(name, s, instance, source, stable, p, port), + Samples: samples, + }) + } + } + } } } } + return staleMarkers } @@ -513,7 +793,7 @@ func resultsToPromTimeSeries(results []result, instance string, timeouts map[res for _, r := range results { timeoutsCount := timeouts[r.key] // a non-existent key will return a zero val seenKeys[r.key] = true - rttLabels := timeSeriesLabels(rttMetricName, r.key.meta, instance, r.key.timestampSource, r.key.connStability, r.key.dstPort) + rttLabels := timeSeriesLabels(rttMetricName, r.key.meta, instance, r.key.timestampSource, r.key.connStability, r.key.protocol, r.key.dstPort) rttSamples := make([]prompb.Sample, 1) rttSamples[0].Timestamp = r.at.UnixMilli() if r.rtt != nil { @@ -528,7 +808,7 @@ func resultsToPromTimeSeries(results []result, instance string, timeouts map[res } all = append(all, rttTS) timeouts[r.key] = timeoutsCount - timeoutsLabels := timeSeriesLabels(timeoutsMetricName, r.key.meta, instance, r.key.timestampSource, r.key.connStability, r.key.dstPort) + timeoutsLabels := timeSeriesLabels(timeoutsMetricName, r.key.meta, instance, r.key.timestampSource, r.key.connStability, r.key.protocol, r.key.dstPort) timeoutsSamples := make([]prompb.Sample, 1) timeoutsSamples[0].Timestamp = r.at.UnixMilli() timeoutsSamples[0].Value = float64(timeoutsCount) @@ -620,22 +900,66 @@ func remoteWriteTimeSeries(client *remoteWriteClient, tsCh chan []prompb.TimeSer } } +func getPortsFromFlag(f string) ([]int, error) { + if len(f) == 0 { + return nil, nil + } + split := strings.Split(f, ",") + slices.Sort(split) + split = slices.Compact(split) + ports := make([]int, 0) + for _, portStr := range split { + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, err + } + ports = append(ports, int(port)) + } + return ports, nil +} + func main() { + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { + log.Fatal("unsupported platform") + } flag.Parse() - if len(*flagDstPorts) == 0 { - log.Fatal("dst-ports flag is unset") - } - dstPortsSplit := strings.Split(*flagDstPorts, ",") - slices.Sort(dstPortsSplit) - dstPortsSplit = slices.Compact(dstPortsSplit) - dstPorts := make([]int, 0, len(dstPortsSplit)) - for _, d := range dstPortsSplit { - i, err := strconv.ParseUint(d, 10, 16) - if err != nil { - log.Fatal("invalid dst-ports") + + portsByProtocol := make(map[protocol][]int) + stunPorts, err := getPortsFromFlag(*flagSTUNDstPorts) + if err != nil { + log.Fatalf("invalid stun-dst-ports flag value: %v", err) + } + if len(stunPorts) > 0 { + portsByProtocol[protocolSTUN] = stunPorts + } + httpsPorts, err := getPortsFromFlag(*flagHTTPSDstPorts) + if err != nil { + log.Fatalf("invalid https-dst-ports flag value: %v", err) + } + if len(httpsPorts) > 0 { + portsByProtocol[protocolHTTPS] = httpsPorts + } + tcpPorts, err := getPortsFromFlag(*flagTCPDstPorts) + if err != nil { + log.Fatalf("invalid tcp-dst-ports flag value: %v", err) + } + if len(tcpPorts) > 0 { + portsByProtocol[protocolTCP] = tcpPorts + } + if *flagICMP { + portsByProtocol[protocolICMP] = []int{0} + } + if len(portsByProtocol) == 0 { + log.Fatal("nothing to probe") + } + + // TODO(jwhited): remove protocol restriction + for k := range portsByProtocol { + if k != protocolSTUN && k != protocolHTTPS && k != protocolTCP { + log.Fatal("ICMP is not yet supported") } - dstPorts = append(dstPorts, int(i)) } + if len(*flagDERPMap) < 1 { log.Fatal("derp-map flag is unset") } @@ -645,7 +969,7 @@ func main() { if len(*flagRemoteWriteURL) < 1 { log.Fatal("rw-url flag is unset") } - _, err := url.Parse(*flagRemoteWriteURL) + _, err = url.Parse(*flagRemoteWriteURL) if err != nil { log.Fatalf("invalid rw-url flag value: %v", err) } @@ -707,7 +1031,7 @@ func main() { for _, v := range nodeMetaByAddr { staleMeta = append(staleMeta, v) } - staleMarkers := staleMarkersFromNodeMeta(staleMeta, *flagInstance, dstPorts) + staleMarkers := staleMarkersFromNodeMeta(staleMeta, *flagInstance, portsByProtocol) if len(staleMarkers) > 0 { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) rwc.write(ctx, staleMarkers) @@ -723,8 +1047,8 @@ func main() { // in a higher probability of the packets traversing the same underlay path. // Comparison of stable and unstable 5-tuple results can shed light on // differences between paths where hashing (multipathing/load balancing) - // comes into play. - stableConns := make(map[netip.Addr]map[int][2]io.ReadWriteCloser) + // comes into play. The inner 2 element array index is timestampSource. + stableConns := make(map[stableConnKey][2]*connAndMeasureFn) // timeouts holds counts of timeout events. Values are persisted for the // lifetime of the related node in the DERP map. @@ -738,7 +1062,7 @@ func main() { for { select { case <-probeTicker.C: - results, err := probeNodes(nodeMetaByAddr, stableConns, dstPorts) + results, err := probeNodes(nodeMetaByAddr, stableConns, portsByProtocol) if err != nil { log.Printf("unrecoverable error while probing: %v", err) shutdown() @@ -761,7 +1085,7 @@ func main() { log.Printf("error parsing DERP map, continuing with stale map: %v", err) continue } - staleMarkers := staleMarkersFromNodeMeta(staleMeta, *flagInstance, dstPorts) + staleMarkers := staleMarkersFromNodeMeta(staleMeta, *flagInstance, portsByProtocol) if len(staleMarkers) < 1 { continue } @@ -780,7 +1104,7 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() updatedDM, err := getDERPMap(ctx, *flagDERPMap) - if err != nil { + if err == nil { dmCh <- updatedDM } }() diff --git a/cmd/stunstamp/stunstamp_default.go b/cmd/stunstamp/stunstamp_default.go index 2fb69dc68ee98..36afdbb8fc044 100644 --- a/cmd/stunstamp/stunstamp_default.go +++ b/cmd/stunstamp/stunstamp_default.go @@ -8,18 +8,42 @@ package main import ( "errors" "io" - "net" + "net/netip" "time" ) -func getConnKernelTimestamp() (io.ReadWriteCloser, error) { +func getUDPConnKernelTimestamp() (io.ReadWriteCloser, error) { return nil, errors.New("unimplemented") } -func measureRTTKernel(conn io.ReadWriteCloser, dst *net.UDPAddr) (rtt time.Duration, err error) { +func measureSTUNRTTKernel(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) { return 0, errors.New("unimplemented") } -func supportsKernelTS() bool { - return false +func getProtocolSupportInfo(p protocol) protocolSupportInfo { + switch p { + case protocolSTUN: + return protocolSupportInfo{ + kernelTS: false, + userspaceTS: true, + stableConn: true, + } + case protocolHTTPS: + return protocolSupportInfo{ + kernelTS: false, + userspaceTS: true, + stableConn: true, + } + case protocolTCP: + return protocolSupportInfo{ + kernelTS: true, + userspaceTS: false, + stableConn: true, + } + } + return protocolSupportInfo{} +} + +func setSOReuseAddr(fd uintptr) error { + return nil } diff --git a/cmd/stunstamp/stunstamp_linux.go b/cmd/stunstamp/stunstamp_linux.go index 898ab19f12273..e73d1ee3c59ea 100644 --- a/cmd/stunstamp/stunstamp_linux.go +++ b/cmd/stunstamp/stunstamp_linux.go @@ -10,7 +10,8 @@ import ( "errors" "fmt" "io" - "net" + "net/netip" + "syscall" "time" "github.com/mdlayher/socket" @@ -24,7 +25,7 @@ const ( unix.SOF_TIMESTAMPING_SOFTWARE // report software timestamps ) -func getConnKernelTimestamp() (io.ReadWriteCloser, error) { +func getUDPConnKernelTimestamp() (io.ReadWriteCloser, error) { sconn, err := socket.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP, "udp", nil) if err != nil { return nil, err @@ -56,24 +57,23 @@ func parseTimestampFromCmsgs(oob []byte) (time.Time, error) { return time.Time{}, errors.New("failed to parse timestamp from cmsgs") } -func measureRTTKernel(conn io.ReadWriteCloser, dst *net.UDPAddr) (rtt time.Duration, err error) { +func measureSTUNRTTKernel(conn io.ReadWriteCloser, hostname string, dst netip.AddrPort) (rtt time.Duration, err error) { sconn, ok := conn.(*socket.Conn) if !ok { return 0, fmt.Errorf("conn of unexpected type: %T", conn) } var to unix.Sockaddr - to4 := dst.IP.To4() - if to4 != nil { + if dst.Addr().Is4() { to = &unix.SockaddrInet4{ - Port: dst.Port, + Port: int(dst.Port()), } - copy(to.(*unix.SockaddrInet4).Addr[:], to4) + copy(to.(*unix.SockaddrInet4).Addr[:], dst.Addr().AsSlice()) } else { to = &unix.SockaddrInet6{ - Port: dst.Port, + Port: int(dst.Port()), } - copy(to.(*unix.SockaddrInet6).Addr[:], dst.IP) + copy(to.(*unix.SockaddrInet6).Addr[:], dst.Addr().AsSlice()) } txID := stun.NewTxID() @@ -138,6 +138,32 @@ func measureRTTKernel(conn io.ReadWriteCloser, dst *net.UDPAddr) (rtt time.Durat } -func supportsKernelTS() bool { - return true +func getProtocolSupportInfo(p protocol) protocolSupportInfo { + switch p { + case protocolSTUN: + return protocolSupportInfo{ + kernelTS: true, + userspaceTS: true, + stableConn: true, + } + case protocolHTTPS: + return protocolSupportInfo{ + kernelTS: false, + userspaceTS: true, + stableConn: true, + } + case protocolTCP: + return protocolSupportInfo{ + kernelTS: true, + userspaceTS: false, + stableConn: true, + } + // TODO(jwhited): add ICMP + } + return protocolSupportInfo{} +} + +func setSOReuseAddr(fd uintptr) error { + // we may restart faster than TIME_WAIT can clear + return syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1) } diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index db0f057cee596..9c8eca5b7d7d0 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -16,6 +16,7 @@ import ( "net/http" "os" "strings" + "time" "github.com/peterbourgon/ff/v3/ffcli" "software.sslmate.com/src/go-pkcs12" @@ -34,14 +35,16 @@ var certCmd = &ffcli.Command{ fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") + fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") return fs })(), } var certArgs struct { - certFile string - keyFile string - serve bool + certFile string + keyFile string + serve bool + minValidity time.Duration } func runCert(ctx context.Context, args []string) error { @@ -102,7 +105,7 @@ func runCert(ctx context.Context, args []string) error { certArgs.certFile = domain + ".crt" certArgs.keyFile = domain + ".key" } - certPEM, keyPEM, err := localClient.CertPair(ctx, domain) + certPEM, keyPEM, err := localClient.CertPairWithValidity(ctx, domain, certArgs.minValidity) if err != nil { return err } diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 929b639589ad8..efbdd3e40680a 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -84,6 +84,13 @@ var localClient = tailscale.LocalClient{ // Run runs the CLI. The args do not include the binary name. func Run(args []string) (err error) { + if runtime.GOOS == "linux" && os.Getenv("GOKRAZY_FIRST_START") == "1" && distro.Get() == distro.Gokrazy && os.Getppid() == 1 { + // We're running on gokrazy and it's the first start. + // Don't run the tailscale CLI as a service; just exit. + // See https://gokrazy.org/development/process-interface/ + os.Exit(0) + } + args = CleanUpArgs(args) if len(args) == 1 && (args[0] == "-V" || args[0] == "--version") { diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 2fbc9d9195529..682cd99a3c6e4 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -52,9 +52,15 @@ func runNetcheck(ctx context.Context, args []string) error { if err != nil { return err } + + // Ensure that we close the portmapper after running a netcheck; this + // will release any port mappings created. + pm := portmapper.NewClient(logf, netMon, nil, nil, nil) + defer pm.Close() + c := &netcheck.Client{ NetMon: netMon, - PortMapper: portmapper.NewClient(logf, netMon, nil, nil, nil), + PortMapper: pm, UseDNSCache: false, // always resolve, don't cache } if netcheckArgs.verbose { diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 6d9dd35f13f71..7bea1f7249e19 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -20,6 +20,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" + "tailscale.com/tsconst" "tailscale.com/types/key" "tailscale.com/types/tkatype" ) @@ -443,15 +444,33 @@ func runNetworkLockModify(ctx context.Context, addArgs, removeArgs []string) err var nlSignCmd = &ffcli.Command{ Name: "sign", - ShortUsage: "tailscale lock sign [] or sign ", + ShortUsage: "tailscale lock sign []\ntailscale lock sign ", ShortHelp: "Signs a node or pre-approved auth key", LongHelp: `Either: - - signs a node key and transmits the signature to the coordination server, or - - signs a pre-approved auth key, printing it in a form that can be used to bring up nodes under tailnet lock`, + - signs a node key and transmits the signature to the coordination + server, or + - signs a pre-approved auth key, printing it in a form that can be + used to bring up nodes under tailnet lock + +If any of the key arguments begin with "file:", the key is retrieved from +the file at the path specified in the argument suffix.`, Exec: runNetworkLockSign, } func runNetworkLockSign(ctx context.Context, args []string) error { + // If any of the arguments start with "file:", replace that argument + // with the contents of the file. We do this early, before the check + // to see if the first argument is an auth key. + for i, arg := range args { + if filename, ok := strings.CutPrefix(arg, "file:"); ok { + b, err := os.ReadFile(filename) + if err != nil { + return err + } + args[i] = strings.TrimSpace(string(b)) + } + } + if len(args) > 0 && strings.HasPrefix(args[0], "tskey-auth-") { return runTskeyWrapCmd(ctx, args) } @@ -476,7 +495,7 @@ func runNetworkLockSign(ctx context.Context, args []string) error { err := localClient.NetworkLockSign(ctx, nodeKey, []byte(rotationKey.Verifier())) // Provide a better help message for when someone clicks through the signing flow // on the wrong device. - if err != nil && strings.Contains(err.Error(), "this node is not trusted by network lock") { + if err != nil && strings.Contains(err.Error(), tsconst.TailnetLockNotTrustedMsg) { fmt.Fprintln(Stderr, "Error: Signing is not available on this device because it does not have a trusted tailnet lock key.") fmt.Fprintln(Stderr) fmt.Fprintln(Stderr, "Try again on a signing device instead. Tailnet admins can see signing devices on the admin panel.") @@ -789,7 +808,7 @@ func runNetworkLockRevokeKeys(ctx context.Context, args []string) error { } fmt.Printf(`Run the following command on another machine with a trusted tailnet lock key: - %s lock recover-compromised-key --cosign %X + %s lock revoke-keys --cosign %X `, os.Args[0], aumBytes) return nil } @@ -813,10 +832,10 @@ func runNetworkLockRevokeKeys(ctx context.Context, args []string) error { fmt.Printf(`Co-signing completed successfully. To accumulate an additional signature, run the following command on another machine with a trusted tailnet lock key: - %s lock recover-compromised-key --cosign %X + %s lock revoke-keys --cosign %X Alternatively if you are done with co-signing, complete recovery by running the following command: - %s lock recover-compromised-key --finish %X + %s lock revoke-keys --finish %X `, os.Args[0], aumBytes, os.Args[0], aumBytes) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 80b011d04a908..ed43a51c4658e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -5,11 +5,15 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + github.com/coder/websocket from tailscale.com/control/controlhttp+ + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode github.com/fxamacker/cbor/v2 from tailscale.com/tka - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ @@ -66,10 +70,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep go4.org/netipx from tailscale.com/net/tsaddr W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli - nhooyr.io/websocket from tailscale.com/control/controlhttp+ - nhooyr.io/websocket/internal/errd from nhooyr.io/websocket - nhooyr.io/websocket/internal/util from nhooyr.io/websocket - nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket sigs.k8s.io/yaml from tailscale.com/cmd/tailscale/cli sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli @@ -100,9 +100,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+ tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/derp + tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ - tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp + tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+ tailscale.com/net/flowtrack from tailscale.com/net/packet tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli @@ -127,7 +128,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tailcfg from tailscale.com/client/tailscale+ tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/client/tailscale+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ @@ -151,9 +152,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/cloudenv from tailscale.com/net/dnscache+ tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+ tailscale.com/util/ctxkey from tailscale.com/types/logger + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/groupmember from tailscale.com/client/web + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineread from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns @@ -166,6 +169,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/ipn + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli tailscale.com/util/vizerror from tailscale.com/tailcfg+ @@ -190,7 +195,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ W golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ - golang.org/x/exp/maps from tailscale.com/cmd/tailscale/cli + golang.org/x/exp/maps from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ @@ -307,7 +312,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep reflect from archive/tar+ regexp from github.com/coreos/go-iptables/iptables+ regexp/syntax from regexp - runtime/debug from nhooyr.io/websocket/internal/xsync+ + runtime/debug from github.com/coder/websocket/internal/xsync+ slices from tailscale.com/client/web+ sort from archive/tar+ strconv from archive/tar+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5b37778f8fcdc..b67533cfb57dc 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -79,6 +79,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/bits-and-blooms/bitset from github.com/gaissmai/bart + github.com/coder/websocket from tailscale.com/control/controlhttp+ + github.com/coder/websocket/internal/errd from github.com/coder/websocket + github.com/coder/websocket/internal/util from github.com/coder/websocket + github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ @@ -90,7 +94,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/djherbis/times from tailscale.com/drive/driveimpl github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/tstun+ - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json/jsontext+ @@ -212,7 +216,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/stack+ - gvisor.dev/gvisor/pkg/tcpip/link/channel from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ @@ -222,6 +225,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ @@ -232,10 +236,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ - nhooyr.io/websocket from tailscale.com/control/controlhttp+ - nhooyr.io/websocket/internal/errd from nhooyr.io/websocket - nhooyr.io/websocket/internal/util from nhooyr.io/websocket - nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ @@ -288,6 +288,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ @@ -329,6 +330,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ + LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale+ @@ -336,7 +338,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tka from tailscale.com/client/tailscale+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -394,6 +396,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index a2ab7271cb7f5..6dbf6c98231de 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -35,6 +35,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnlocal" @@ -154,9 +155,11 @@ var beCLI func() // non-nil if CLI is linked in func main() { envknob.PanicIfAnyEnvCheckedInInit() envknob.ApplyDiskConfig() + applyIntegrationTestEnvKnob() + defaultVerbosity := envknob.RegisterInt("TS_LOG_VERBOSITY") printVersion := false - flag.IntVar(&args.verbose, "verbose", 0, "log verbosity level; 0 is default, 1 or higher are increasingly verbose") + flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) @@ -394,7 +397,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, args.tunname) + dns.CleanUp(logf, netMon, sys.HealthTracker(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -895,3 +898,24 @@ func dieOnPipeReadErrorOfFD(fd int) { f.Read(make([]byte, 1)) os.Exit(1) } + +// applyIntegrationTestEnvKnob applies the tailscaled.env=... environment +// variables specified on the Linux kernel command line, if the VM is being +// run in NATLab integration tests. +// +// They're specified as: tailscaled.env=FOO=bar tailscaled.env=BAR=baz +func applyIntegrationTestEnvKnob() { + if runtime.GOOS != "linux" || !hostinfo.IsNATLabGuestVM() { + return + } + cmdLine, _ := os.ReadFile("/proc/cmdline") + for _, s := range strings.Fields(string(cmdLine)) { + suf, ok := strings.CutPrefix(s, "tailscaled.env=") + if !ok { + continue + } + if k, v, ok := strings.Cut(suf, "="); ok { + envknob.Setenv(k, v) + } + } +} diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 43a3ae94b3fa9..3e077f9fdf782 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -7,6 +7,7 @@ package main import ( + "bytes" "context" crand "crypto/rand" "crypto/rsa" @@ -16,6 +17,7 @@ import ( "encoding/binary" "encoding/json" "encoding/pem" + "errors" "flag" "fmt" "io" @@ -25,6 +27,7 @@ import ( "net/netip" "net/url" "os" + "os/signal" "strconv" "strings" "sync" @@ -35,6 +38,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/tsnet" @@ -44,13 +48,22 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/rands" + "tailscale.com/version" ) +// ctxConn is a key to look up a net.Conn stored in an HTTP request's context. +type ctxConn struct{} + +// funnelClientsFile is the file where client IDs and secrets for OIDC clients +// accessing the IDP over Funnel are persisted. +const funnelClientsFile = "oidc-funnel-clients.json" + var ( flagVerbose = flag.Bool("verbose", false, "be verbose") flagPort = flag.Int("port", 443, "port to listen on") flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") + flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") ) func main() { @@ -61,9 +74,11 @@ func main() { } var ( - lc *tailscale.LocalClient - st *ipnstate.Status - err error + lc *tailscale.LocalClient + st *ipnstate.Status + err error + watcherChan chan error + cleanup func() lns []net.Listener ) @@ -90,6 +105,18 @@ func main() { if !anySuccess { log.Fatalf("failed to listen on any of %v", st.TailscaleIPs) } + + // tailscaled needs to be setting an HTTP header for funneled requests + // that older versions don't provide. + // TODO(naman): is this the correct check? + if *flagFunnel && !version.AtLeast(st.Version, "1.71.0") { + log.Fatalf("Local tailscaled not new enough to support -funnel. Update Tailscale or use tsnet mode.") + } + cleanup, watcherChan, err = serveOnLocalTailscaled(ctx, lc, st, uint16(*flagPort), *flagFunnel) + if err != nil { + log.Fatalf("could not serve on local tailscaled: %v", err) + } + defer cleanup() } else { ts := &tsnet.Server{ Hostname: "idp", @@ -105,7 +132,15 @@ func main() { if err != nil { log.Fatalf("getting local client: %v", err) } - ln, err := ts.ListenTLS("tcp", fmt.Sprintf(":%d", *flagPort)) + var ln net.Listener + if *flagFunnel { + if err := ipn.CheckFunnelAccess(uint16(*flagPort), st.Self); err != nil { + log.Fatalf("%v", err) + } + ln, err = ts.ListenFunnel("tcp", fmt.Sprintf(":%d", *flagPort)) + } else { + ln, err = ts.ListenTLS("tcp", fmt.Sprintf(":%d", *flagPort)) + } if err != nil { log.Fatal(err) } @@ -113,13 +148,26 @@ func main() { } srv := &idpServer{ - lc: lc, + lc: lc, + funnel: *flagFunnel, + localTSMode: *flagUseLocalTailscaled, } if *flagPort != 443 { srv.serverURL = fmt.Sprintf("https://%s:%d", strings.TrimSuffix(st.Self.DNSName, "."), *flagPort) } else { srv.serverURL = fmt.Sprintf("https://%s", strings.TrimSuffix(st.Self.DNSName, ".")) } + if *flagFunnel { + f, err := os.Open(funnelClientsFile) + if err == nil { + srv.funnelClients = make(map[string]*funnelClient) + if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { + log.Fatalf("could not parse %s: %v", funnelClientsFile, err) + } + } else if !errors.Is(err, os.ErrNotExist) { + log.Fatalf("could not open %s: %v", funnelClientsFile, err) + } + } log.Printf("Running tsidp at %s ...", srv.serverURL) @@ -134,35 +182,129 @@ func main() { } for _, ln := range lns { - go http.Serve(ln, srv) + server := http.Server{ + Handler: srv, + ConnContext: func(ctx context.Context, c net.Conn) context.Context { + return context.WithValue(ctx, ctxConn{}, c) + }, + } + go server.Serve(ln) + } + // need to catch os.Interrupt, otherwise deferred cleanup code doesn't run + exitChan := make(chan os.Signal, 1) + signal.Notify(exitChan, os.Interrupt) + select { + case <-exitChan: + log.Printf("interrupt, exiting") + return + case <-watcherChan: + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + log.Printf("watcher closed, exiting") + return + } + log.Fatalf("watcher error: %v", err) + return + } +} + +// serveOnLocalTailscaled starts a serve session using an already-running +// tailscaled instead of starting a fresh tsnet server, making something +// listening on clientDNSName:dstPort accessible over serve/funnel. +func serveOnLocalTailscaled(ctx context.Context, lc *tailscale.LocalClient, st *ipnstate.Status, dstPort uint16, shouldFunnel bool) (cleanup func(), watcherChan chan error, err error) { + // In order to support funneling out in local tailscaled mode, we need + // to add a serve config to forward the listeners we bound above and + // allow those forwarders to be funneled out. + sc, err := lc.GetServeConfig(ctx) + if err != nil { + return nil, nil, fmt.Errorf("could not get serve config: %v", err) + } + if sc == nil { + sc = new(ipn.ServeConfig) + } + + // We watch the IPN bus just to get a session ID. The session expires + // when we stop watching the bus, and that auto-deletes the foreground + // serve/funnel configs we are creating below. + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + if err != nil { + return nil, nil, fmt.Errorf("could not set up ipn bus watcher: %v", err) + } + defer func() { + if err != nil { + watcher.Close() + } + }() + n, err := watcher.Next() + if err != nil { + return nil, nil, fmt.Errorf("could not get initial state from ipn bus watcher: %v", err) + } + if n.SessionID == "" { + err = fmt.Errorf("missing sessionID in ipn.Notify") + return nil, nil, err + } + watcherChan = make(chan error) + go func() { + for { + _, err = watcher.Next() + if err != nil { + watcherChan <- err + return + } + } + }() + + // Create a foreground serve config that gets cleaned up when tsidp + // exits and the session ID associated with this config is invalidated. + foregroundSc := new(ipn.ServeConfig) + mak.Set(&sc.Foreground, n.SessionID, foregroundSc) + serverURL := strings.TrimSuffix(st.Self.DNSName, ".") + fmt.Printf("setting funnel for %s:%v\n", serverURL, dstPort) + + foregroundSc.SetFunnel(serverURL, dstPort, shouldFunnel) + foregroundSc.SetWebHandler(&ipn.HTTPHandler{ + Proxy: fmt.Sprintf("https://%s", net.JoinHostPort(serverURL, strconv.Itoa(int(dstPort)))), + }, serverURL, uint16(*flagPort), "/", true) + err = lc.SetServeConfig(ctx, sc) + if err != nil { + return nil, watcherChan, fmt.Errorf("could not set serve config: %v", err) } - select {} + + return func() { watcher.Close() }, watcherChan, nil } type idpServer struct { lc *tailscale.LocalClient loopbackURL string serverURL string // "https://foo.bar.ts.net" + funnel bool + localTSMode bool lazyMux lazy.SyncValue[*http.ServeMux] lazySigningKey lazy.SyncValue[*signingKey] lazySigner lazy.SyncValue[jose.Signer] - mu sync.Mutex // guards the fields below - code map[string]*authRequest // keyed by random hex - accessToken map[string]*authRequest // keyed by random hex + mu sync.Mutex // guards the fields below + code map[string]*authRequest // keyed by random hex + accessToken map[string]*authRequest // keyed by random hex + funnelClients map[string]*funnelClient // keyed by client ID } type authRequest struct { // localRP is true if the request is from a relying party running on the - // same machine as the idp server. It is mutually exclusive with rpNodeID. + // same machine as the idp server. It is mutually exclusive with rpNodeID + // and funnelRP. localRP bool // rpNodeID is the NodeID of the relying party (who requested the auth, such // as Proxmox or Synology), not the user node who is being authenticated. It - // is mutually exclusive with localRP. + // is mutually exclusive with localRP and funnelRP. rpNodeID tailcfg.NodeID + // funnelRP is non-nil if the request is from a relying party outside the + // tailnet, via Tailscale Funnel. It is mutually exclusive with rpNodeID + // and localRP. + funnelRP *funnelClient + // clientID is the "client_id" sent in the authorized request. clientID string @@ -181,9 +323,12 @@ type authRequest struct { validTill time.Time } -func (ar *authRequest) allowRelyingParty(ctx context.Context, remoteAddr string, lc *tailscale.LocalClient) error { +// allowRelyingParty validates that a relying party identified either by a +// known remoteAddr or a valid client ID/secret pair is allowed to proceed +// with the authorization flow associated with this authRequest. +func (ar *authRequest) allowRelyingParty(r *http.Request, lc *tailscale.LocalClient) error { if ar.localRP { - ra, err := netip.ParseAddrPort(remoteAddr) + ra, err := netip.ParseAddrPort(r.RemoteAddr) if err != nil { return err } @@ -192,7 +337,18 @@ func (ar *authRequest) allowRelyingParty(ctx context.Context, remoteAddr string, } return nil } - who, err := lc.WhoIs(ctx, remoteAddr) + if ar.funnelRP != nil { + clientID, clientSecret, ok := r.BasicAuth() + if !ok { + clientID = r.FormValue("client_id") + clientSecret = r.FormValue("client_secret") + } + if ar.funnelRP.ID != clientID || ar.funnelRP.Secret != clientSecret { + return fmt.Errorf("tsidp: invalid client credentials") + } + return nil + } + who, err := lc.WhoIs(r.Context(), r.RemoteAddr) if err != nil { return fmt.Errorf("tsidp: error getting WhoIs: %w", err) } @@ -203,24 +359,60 @@ func (ar *authRequest) allowRelyingParty(ctx context.Context, remoteAddr string, } func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { - who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr) + // This URL is visited by the user who is being authenticated. If they are + // visiting the URL over Funnel, that means they are not part of the + // tailnet that they are trying to be authenticated for. + if isFunnelRequest(r) { + http.Error(w, "tsidp: unauthorized", http.StatusUnauthorized) + return + } + + uq := r.URL.Query() + + redirectURI := uq.Get("redirect_uri") + if redirectURI == "" { + http.Error(w, "tsidp: must specify redirect_uri", http.StatusBadRequest) + return + } + + var remoteAddr string + if s.localTSMode { + // in local tailscaled mode, the local tailscaled is forwarding us + // HTTP requests, so reading r.RemoteAddr will just get us our own + // address. + remoteAddr = r.Header.Get("X-Forwarded-For") + } else { + remoteAddr = r.RemoteAddr + } + who, err := s.lc.WhoIs(r.Context(), remoteAddr) if err != nil { log.Printf("Error getting WhoIs: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } - uq := r.URL.Query() - code := rands.HexString(32) ar := &authRequest{ nonce: uq.Get("nonce"), remoteUser: who, - redirectURI: uq.Get("redirect_uri"), + redirectURI: redirectURI, clientID: uq.Get("client_id"), } - if r.URL.Path == "/authorize/localhost" { + if r.URL.Path == "/authorize/funnel" { + s.mu.Lock() + c, ok := s.funnelClients[ar.clientID] + s.mu.Unlock() + if !ok { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + if ar.redirectURI != c.RedirectURI { + http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) + return + } + ar.funnelRP = c + } else if r.URL.Path == "/authorize/localhost" { ar.localRP = true } else { var ok bool @@ -237,8 +429,10 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { q := make(url.Values) q.Set("code", code) - q.Set("state", uq.Get("state")) - u := uq.Get("redirect_uri") + "?" + q.Encode() + if state := uq.Get("state"); state != "" { + q.Set("state", state) + } + u := redirectURI + "?" + q.Encode() log.Printf("Redirecting to %q", u) http.Redirect(w, r, u, http.StatusFound) @@ -251,6 +445,7 @@ func (s *idpServer) newMux() *http.ServeMux { mux.HandleFunc("/authorize/", s.authorize) mux.HandleFunc("/userinfo", s.serveUserInfo) mux.HandleFunc("/token", s.serveToken) + mux.HandleFunc("/clients/", s.serveClients) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/" { io.WriteString(w, "

Tailscale OIDC IdP

") @@ -284,11 +479,6 @@ func (s *idpServer) serveUserInfo(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: invalid token", http.StatusBadRequest) return } - if err := ar.allowRelyingParty(r.Context(), r.RemoteAddr, s.lc); err != nil { - log.Printf("Error allowing relying party: %v", err) - http.Error(w, err.Error(), http.StatusForbidden) - return - } if ar.validTill.Before(time.Now()) { http.Error(w, "tsidp: token expired", http.StatusBadRequest) @@ -348,7 +538,7 @@ func (s *idpServer) serveToken(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: code not found", http.StatusBadRequest) return } - if err := ar.allowRelyingParty(r.Context(), r.RemoteAddr, s.lc); err != nil { + if err := ar.allowRelyingParty(r, s.lc); err != nil { log.Printf("Error allowing relying party: %v", err) http.Error(w, err.Error(), http.StatusForbidden) return @@ -581,7 +771,9 @@ func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { } var authorizeEndpoint string rpEndpoint := s.serverURL - if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { + if isFunnelRequest(r) { + authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) + } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) } else if ap.Addr().IsLoopback() { rpEndpoint = s.loopbackURL @@ -611,6 +803,148 @@ func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { } } +// funnelClient represents an OIDC client/relying party that is accessing the +// IDP over Funnel. +type funnelClient struct { + ID string `json:"client_id"` + Secret string `json:"client_secret,omitempty"` + Name string `json:"name,omitempty"` + RedirectURI string `json:"redirect_uri"` +} + +// /clients is a privileged endpoint that allows the visitor to create new +// Funnel-capable OIDC clients, so it is only accessible over the tailnet. +func (s *idpServer) serveClients(w http.ResponseWriter, r *http.Request) { + if isFunnelRequest(r) { + http.Error(w, "tsidp: not found", http.StatusNotFound) + return + } + + path := strings.TrimPrefix(r.URL.Path, "/clients/") + + if path == "new" { + s.serveNewClient(w, r) + return + } + + if path == "" { + s.serveGetClientsList(w, r) + return + } + + s.mu.Lock() + c, ok := s.funnelClients[path] + s.mu.Unlock() + if !ok { + http.Error(w, "tsidp: not found", http.StatusNotFound) + return + } + + switch r.Method { + case "DELETE": + s.serveDeleteClient(w, r, path) + case "GET": + json.NewEncoder(w).Encode(&funnelClient{ + ID: c.ID, + Name: c.Name, + Secret: "", + RedirectURI: c.RedirectURI, + }) + default: + http.Error(w, "tsidp: method not allowed", http.StatusMethodNotAllowed) + } +} + +func (s *idpServer) serveNewClient(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "tsidp: method not allowed", http.StatusMethodNotAllowed) + return + } + redirectURI := r.FormValue("redirect_uri") + if redirectURI == "" { + http.Error(w, "tsidp: must provide redirect_uri", http.StatusBadRequest) + return + } + clientID := rands.HexString(32) + clientSecret := rands.HexString(64) + newClient := funnelClient{ + ID: clientID, + Secret: clientSecret, + Name: r.FormValue("name"), + RedirectURI: redirectURI, + } + s.mu.Lock() + defer s.mu.Unlock() + mak.Set(&s.funnelClients, clientID, &newClient) + if err := s.storeFunnelClientsLocked(); err != nil { + log.Printf("could not write funnel clients db: %v", err) + http.Error(w, "tsidp: could not write funnel clients to db", http.StatusInternalServerError) + // delete the new client to avoid inconsistent state between memory + // and disk + delete(s.funnelClients, clientID) + return + } + json.NewEncoder(w).Encode(newClient) +} + +func (s *idpServer) serveGetClientsList(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "tsidp: method not allowed", http.StatusMethodNotAllowed) + return + } + s.mu.Lock() + redactedClients := make([]funnelClient, 0, len(s.funnelClients)) + for _, c := range s.funnelClients { + redactedClients = append(redactedClients, funnelClient{ + ID: c.ID, + Name: c.Name, + Secret: "", + RedirectURI: c.RedirectURI, + }) + } + s.mu.Unlock() + json.NewEncoder(w).Encode(redactedClients) +} + +func (s *idpServer) serveDeleteClient(w http.ResponseWriter, r *http.Request, clientID string) { + if r.Method != "DELETE" { + http.Error(w, "tsidp: method not allowed", http.StatusMethodNotAllowed) + return + } + s.mu.Lock() + defer s.mu.Unlock() + if s.funnelClients == nil { + http.Error(w, "tsidp: client not found", http.StatusNotFound) + return + } + if _, ok := s.funnelClients[clientID]; !ok { + http.Error(w, "tsidp: client not found", http.StatusNotFound) + return + } + deleted := s.funnelClients[clientID] + delete(s.funnelClients, clientID) + if err := s.storeFunnelClientsLocked(); err != nil { + log.Printf("could not write funnel clients db: %v", err) + http.Error(w, "tsidp: could not write funnel clients to db", http.StatusInternalServerError) + // restore the deleted value to avoid inconsistent state between memory + // and disk + s.funnelClients[clientID] = deleted + return + } + w.WriteHeader(http.StatusNoContent) +} + +// storeFunnelClientsLocked writes the current mapping of OIDC client ID/secret +// pairs for RPs that access the IDP over funnel. s.mu must be held while +// calling this. +func (s *idpServer) storeFunnelClientsLocked() error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(s.funnelClients); err != nil { + return err + } + return os.WriteFile(funnelClientsFile, buf.Bytes(), 0600) +} + const ( minimumRSAKeySize = 2048 ) @@ -700,3 +1034,24 @@ func parseID[T ~int64](input string) (_ T, ok bool) { } return T(i), true } + +// isFunnelRequest checks if an HTTP request is coming over Tailscale Funnel. +func isFunnelRequest(r *http.Request) bool { + // If we're funneling through the local tailscaled, it will set this HTTP + // header. + if r.Header.Get("Tailscale-Funnel-Request") != "" { + return true + } + + // If the funneled connection is from tsnet, then the net.Conn will be of + // type ipn.FunnelConn. + netConn := r.Context().Value(ctxConn{}) + // if the conn is wrapped inside TLS, unwrap it + if tlsConn, ok := netConn.(*tls.Conn); ok { + netConn = tlsConn.NetConn() + } + if _, ok := netConn.(*ipn.FunnelConn); ok { + return true + } + return false +} diff --git a/cmd/tta/fw_linux.go b/cmd/tta/fw_linux.go new file mode 100644 index 0000000000000..a4ceabad8bc05 --- /dev/null +++ b/cmd/tta/fw_linux.go @@ -0,0 +1,128 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "encoding/binary" + + "github.com/google/nftables" + "github.com/google/nftables/expr" + "tailscale.com/types/ptr" +) + +func init() { + addFirewall = addFirewallLinux +} + +func addFirewallLinux() error { + c, err := nftables.New() + if err != nil { + return err + } + + // Create a new table + table := &nftables.Table{ + Family: nftables.TableFamilyIPv4, // TableFamilyINet doesn't work (why?. oh well.) + Name: "filter", + } + c.AddTable(table) + + // Create a new chain for incoming traffic + inputChain := &nftables.Chain{ + Name: "input", + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookInput, + Priority: nftables.ChainPriorityFilter, + Policy: ptr.To(nftables.ChainPolicyDrop), + } + c.AddChain(inputChain) + + // Allow traffic from the loopback interface + c.AddRule(&nftables.Rule{ + Table: table, + Chain: inputChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte("lo"), + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + }) + + // Accept established and related connections + c.AddRule(&nftables.Rule{ + Table: table, + Chain: inputChain, + Exprs: []expr.Any{ + &expr.Ct{ + Register: 1, + Key: expr.CtKeySTATE, + }, + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: binary.NativeEndian.AppendUint32(nil, 0x06), // CT_STATE_BIT_ESTABLISHED | CT_STATE_BIT_RELATED + Xor: binary.NativeEndian.AppendUint32(nil, 0), + }, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: binary.NativeEndian.AppendUint32(nil, 0x00), + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + }) + + // Allow TCP packets in that don't have the SYN bit set, even if they're not + // ESTABLISHED or RELATED. This is because the test suite gets TCP + // connections up & idle (for HTTP) before it conditionally installs these + // firewall rules. But because conntrack wasn't previously active, existing + // TCP flows aren't ESTABLISHED and get dropped. So this rule allows + // previously established TCP connections that predates the firewall rules + // to continue working, as they don't have conntrack state. + c.AddRule(&nftables.Rule{ + Table: table, + Chain: inputChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte{0x06}, // TCP + }, + &expr.Payload{ // get TCP flags + DestRegister: 1, + Base: 2, + Offset: 13, // flags + Len: 1, + }, + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 1, + Mask: []byte{2}, // TCP_SYN + Xor: []byte{0}, + }, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{2}, // TCP_SYN + }, + &expr.Verdict{ + Kind: expr.VerdictAccept, + }, + }, + }) + + return c.Flush() +} diff --git a/cmd/tta/tta.go b/cmd/tta/tta.go new file mode 100644 index 0000000000000..6a676b0d20889 --- /dev/null +++ b/cmd/tta/tta.go @@ -0,0 +1,238 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The tta server is the Tailscale Test Agent. +// +// It runs on each Tailscale node being integration tested and permits the test +// harness to control the node. It connects out to the test drver (rather than +// accepting any TCP connections inbound, which might be blocked depending on +// the scenario being tested) and then the test driver turns the TCP connection +// around and sends request back. +package main + +import ( + "context" + "errors" + "flag" + "io" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "time" + + "tailscale.com/client/tailscale" + "tailscale.com/hostinfo" + "tailscale.com/util/must" + "tailscale.com/util/set" + "tailscale.com/version/distro" +) + +var ( + driverAddr = flag.String("driver", "test-driver.tailscale:8008", "address of the test driver; by default we use the DNS name test-driver.tailscale which is special cased in the emulated network's DNS server") +) + +func absify(cmd string) string { + if distro.Get() == distro.Gokrazy && !strings.Contains(cmd, "/") { + return "/user/" + cmd + } + return cmd +} + +func serveCmd(w http.ResponseWriter, cmd string, args ...string) { + log.Printf("Got serveCmd for %q %v", cmd, args) + out, err := exec.Command(absify(cmd), args...).CombinedOutput() + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + if err != nil { + w.Header().Set("Exec-Err", err.Error()) + w.WriteHeader(500) + log.Printf("Err on serveCmd for %q %v, %d bytes of output: %v", cmd, args, len(out), err) + } else { + log.Printf("Did serveCmd for %q %v, %d bytes of output", cmd, args, len(out)) + } + w.Write(out) +} + +type localClientRoundTripper struct { + lc tailscale.LocalClient +} + +func (rt *localClientRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = req.Clone(req.Context()) + req.RequestURI = "" + return rt.lc.DoLocalRequest(req) +} + +func main() { + if distro.Get() == distro.Gokrazy { + if !hostinfo.IsNATLabGuestVM() { + // "Exiting immediately with status code 0 when the + // GOKRAZY_FIRST_START=1 environment variable is set means “don’t + // start the program on boot”" + return + } + } + flag.Parse() + + if distro.Get() == distro.Gokrazy { + nsRx := regexp.MustCompile(`(?m)^nameserver (.*)`) + for t := time.Now(); time.Since(t) < 10*time.Second; time.Sleep(10 * time.Millisecond) { + all, _ := os.ReadFile("/etc/resolv.conf") + if nsRx.Match(all) { + break + } + } + } + + logc, err := net.Dial("tcp", "9.9.9.9:124") + if err == nil { + log.SetOutput(logc) + } + + log.Printf("Tailscale Test Agent running.") + + gokRP := httputil.NewSingleHostReverseProxy(must.Get(url.Parse("http://gokrazy"))) + gokRP.Transport = &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + if network != "tcp" { + return nil, errors.New("unexpected network") + } + if addr != "gokrazy:80" { + return nil, errors.New("unexpected addr") + } + var d net.Dialer + return d.DialContext(ctx, "unix", "/run/gokrazy-http.sock") + }, + } + + var ttaMux http.ServeMux // agent mux + var serveMux http.ServeMux + serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-TTA-GoKrazy") == "1" { + gokRP.ServeHTTP(w, r) + return + } + ttaMux.ServeHTTP(w, r) + }) + var hs http.Server + hs.Handler = &serveMux + var ( + stMu sync.Mutex + newSet = set.Set[net.Conn]{} // conns in StateNew + ) + needConnCh := make(chan bool, 1) + hs.ConnState = func(c net.Conn, s http.ConnState) { + stMu.Lock() + defer stMu.Unlock() + oldLen := len(newSet) + switch s { + case http.StateNew: + newSet.Add(c) + default: + newSet.Delete(c) + } + if oldLen != 0 && len(newSet) == 0 { + select { + case needConnCh <- true: + default: + } + } + } + conns := make(chan net.Conn, 1) + + lcRP := httputil.NewSingleHostReverseProxy(must.Get(url.Parse("http://local-tailscaled.sock"))) + lcRP.Transport = new(localClientRoundTripper) + ttaMux.HandleFunc("/localapi/", func(w http.ResponseWriter, r *http.Request) { + log.Printf("Got localapi request: %v", r.URL) + t0 := time.Now() + lcRP.ServeHTTP(w, r) + log.Printf("Did localapi request in %v: %v", time.Since(t0).Round(time.Millisecond), r.URL) + }) + + ttaMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "TTA\n") + return + }) + ttaMux.HandleFunc("/up", func(w http.ResponseWriter, r *http.Request) { + serveCmd(w, "tailscale", "up", "--login-server=http://control.tailscale") + }) + ttaMux.HandleFunc("/fw", addFirewallHandler) + + go hs.Serve(chanListener(conns)) + + // For doing agent operations locally from gokrazy: + // (e.g. with "wget -O - localhost:8123/fw") + go func() { + err := http.ListenAndServe("127.0.0.1:8123", &ttaMux) + if err != nil { + log.Fatalf("ListenAndServe: %v", err) + } + }() + + var lastErr string + needConnCh <- true + for { + <-needConnCh + c, err := connect() + if err != nil { + s := err.Error() + if s != lastErr { + log.Printf("Connect failure: %v", s) + } + lastErr = s + time.Sleep(time.Second) + continue + } + conns <- c + } +} + +func connect() (net.Conn, error) { + c, err := net.Dial("tcp", *driverAddr) + if err != nil { + return nil, err + } + return c, nil +} + +type chanListener <-chan net.Conn + +func (cl chanListener) Accept() (net.Conn, error) { + c, ok := <-cl + if !ok { + return nil, errors.New("closed") + } + return c, nil +} + +func (cl chanListener) Close() error { + return nil +} + +func (cl chanListener) Addr() net.Addr { + return &net.TCPAddr{ + IP: net.ParseIP("52.0.0.34"), // TS..DR(iver) + Port: 123, + } +} + +func addFirewallHandler(w http.ResponseWriter, r *http.Request) { + if addFirewall == nil { + http.Error(w, "firewall not supported", 500) + return + } + err := addFirewall() + if err != nil { + http.Error(w, err.Error(), 500) + return + } + io.WriteString(w, "OK\n") +} + +var addFirewall func() error // set by fw_linux.go diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index ed4d6914a1168..1f1ec05573624 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -9,10 +9,11 @@ import ( "net/netip" "golang.org/x/exp/constraints" + "tailscale.com/types/ptr" "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct --clone-only-type=OnlyGetClone +//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers --clone-only-type=OnlyGetClone type StructWithoutPtrs struct { Int int @@ -114,3 +115,90 @@ type GenericCloneableStruct[T views.ViewCloner[T, V], V views.StructView[T]] str PtrValueMap map[string]*T SliceMap map[string][]T } + +// Container is a pre-defined container type, such as a collection, an optional +// value or a generic wrapper. +type Container[T any] struct { + Item T +} + +func (c *Container[T]) Clone() *Container[T] { + if c == nil { + return nil + } + if cloner, ok := any(c.Item).(views.Cloner[T]); ok { + return &Container[T]{cloner.Clone()} + } + if !views.ContainsPointers[T]() { + return ptr.To(*c) + } + panic(fmt.Errorf("%T contains pointers, but is not cloneable", c.Item)) +} + +// ContainerView is a pre-defined readonly view of a Container[T]. +type ContainerView[T views.ViewCloner[T, V], V views.StructView[T]] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Container[T] +} + +func (cv ContainerView[T, V]) Item() V { + return cv.ж.Item.View() +} + +func ContainerViewOf[T views.ViewCloner[T, V], V views.StructView[T]](c *Container[T]) ContainerView[T, V] { + return ContainerView[T, V]{c} +} + +// MapContainer is a predefined map-like container type. +// Unlike [Container], it has two type parameters, where the value +// is the second parameter. +type MapContainer[K comparable, V views.Cloner[V]] struct { + Items map[K]V +} + +func (c *MapContainer[K, V]) Clone() *MapContainer[K, V] { + if c == nil { + return nil + } + var m map[K]V + if c.Items != nil { + m = make(map[K]V, len(c.Items)) + for i := range m { + m[i] = c.Items[i].Clone() + } + } + return &MapContainer[K, V]{m} +} + +// MapContainerView is a pre-defined readonly view of a [MapContainer][K, T]. +type MapContainerView[K comparable, T views.ViewCloner[T, V], V views.StructView[T]] struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *MapContainer[K, T] +} + +func (cv MapContainerView[K, T, V]) Items() views.MapFn[K, T, V] { + return views.MapFnOf(cv.ж.Items, func(t T) V { return t.View() }) +} + +func MapContainerViewOf[K comparable, T views.ViewCloner[T, V], V views.StructView[T]](c *MapContainer[K, T]) MapContainerView[K, T, V] { + return MapContainerView[K, T, V]{c} +} + +type GenericBasicStruct[T BasicType] struct { + Value T +} + +type StructWithContainers struct { + IntContainer Container[int] + CloneableContainer Container[*StructWithPtrs] + BasicGenericContainer Container[GenericBasicStruct[int]] + CloneableGenericContainer Container[*GenericNoPtrsStruct[int]] + CloneableMap MapContainer[int, *StructWithPtrs] + CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] +} diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index ec5631da9f213..53e6bacfb1eea 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -416,3 +416,28 @@ func _GenericCloneableStructCloneNeedsRegeneration[T views.ViewCloner[T, V], V v SliceMap map[string][]T }{}) } + +// Clone makes a deep copy of StructWithContainers. +// The result aliases no memory with the original. +func (src *StructWithContainers) Clone() *StructWithContainers { + if src == nil { + return nil + } + dst := new(StructWithContainers) + *dst = *src + dst.CloneableContainer = *src.CloneableContainer.Clone() + dst.CloneableGenericContainer = *src.CloneableGenericContainer.Clone() + dst.CloneableMap = *src.CloneableMap.Clone() + dst.CloneableGenericMap = *src.CloneableGenericMap.Clone() + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithContainersCloneNeedsRegeneration = StructWithContainers(struct { + IntContainer Container[int] + CloneableContainer Container[*StructWithPtrs] + BasicGenericContainer Container[GenericBasicStruct[int]] + CloneableGenericContainer Container[*GenericNoPtrsStruct[int]] + CloneableMap MapContainer[int, *StructWithPtrs] + CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] +}{}) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index 9a337f5aafbb4..cf07dc663bf46 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -14,7 +14,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers // View returns a readonly view of StructWithPtrs. func (p *StructWithPtrs) View() StructWithPtrsView { @@ -604,3 +604,75 @@ func _GenericCloneableStructViewNeedsRegeneration[T views.ViewCloner[T, V], V vi SliceMap map[string][]T }{}) } + +// View returns a readonly view of StructWithContainers. +func (p *StructWithContainers) View() StructWithContainersView { + return StructWithContainersView{ж: p} +} + +// StructWithContainersView provides a read-only view over StructWithContainers. +// +// Its methods should only be called if `Valid()` returns true. +type StructWithContainersView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *StructWithContainers +} + +// Valid reports whether underlying value is non-nil. +func (v StructWithContainersView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v StructWithContainersView) AsStruct() *StructWithContainers { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +func (v StructWithContainersView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } + +func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x StructWithContainers + if err := json.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v StructWithContainersView) IntContainer() Container[int] { return v.ж.IntContainer } +func (v StructWithContainersView) CloneableContainer() ContainerView[*StructWithPtrs, StructWithPtrsView] { + return ContainerViewOf(&v.ж.CloneableContainer) +} +func (v StructWithContainersView) BasicGenericContainer() Container[GenericBasicStruct[int]] { + return v.ж.BasicGenericContainer +} +func (v StructWithContainersView) CloneableGenericContainer() ContainerView[*GenericNoPtrsStruct[int], GenericNoPtrsStructView[int]] { + return ContainerViewOf(&v.ж.CloneableGenericContainer) +} +func (v StructWithContainersView) CloneableMap() MapContainerView[int, *StructWithPtrs, StructWithPtrsView] { + return MapContainerViewOf(&v.ж.CloneableMap) +} +func (v StructWithContainersView) CloneableGenericMap() MapContainerView[int, *GenericNoPtrsStruct[int], GenericNoPtrsStructView[int]] { + return MapContainerViewOf(&v.ж.CloneableGenericMap) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithContainersViewNeedsRegeneration = StructWithContainers(struct { + IntContainer Container[int] + CloneableContainer Container[*StructWithPtrs] + BasicGenericContainer Container[GenericBasicStruct[int]] + CloneableGenericContainer Container[*GenericNoPtrsStruct[int]] + CloneableMap MapContainer[int, *StructWithPtrs] + CloneableGenericMap MapContainer[int, *GenericNoPtrsStruct[int]] +}{}) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index d2be6af66db59..2e122a128e2c8 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -13,9 +13,11 @@ import ( "html/template" "log" "os" + "slices" "strings" "tailscale.com/util/codegen" + "tailscale.com/util/must" ) const viewTemplateStr = `{{define "common"}} @@ -75,6 +77,8 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { {{end}} {{define "viewField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldViewName}} { return v.ж.{{.FieldName}}.View() } {{end}} +{{define "makeViewField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldViewName}} { return {{.MakeViewFnName}}(&v.ж.{{.FieldName}}) } +{{end}} {{define "valuePointerField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldType}} { if v.ж.{{.FieldName}} == nil { return nil @@ -144,6 +148,9 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi MapValueType string MapValueView string MapFn string + + // MakeViewFnName is the name of the function that accepts a value and returns a readonly view of it. + MakeViewFnName string }{ StructName: typ.Obj().Name(), ViewName: typ.Origin().Obj().Name() + "View", @@ -227,8 +234,18 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, thi strucT := underlying args.FieldType = it.QualifiedName(fieldType) if codegen.ContainsPointers(strucT) { - args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + if viewType := viewTypeForValueType(fieldType); viewType != nil { + args.FieldViewName = it.QualifiedName(viewType) + writeTemplate("viewField") + continue + } + if viewType, makeViewFn := viewTypeForContainerType(fieldType); viewType != nil { + args.FieldViewName = it.QualifiedName(viewType) + args.MakeViewFnName = it.PackagePrefix(makeViewFn.Pkg()) + makeViewFn.Name() + writeTemplate("makeViewField") + continue + } + writeTemplate("unsupportedField") continue } writeTemplate("valueField") @@ -388,6 +405,9 @@ func appendNameSuffix(name, suffix string) string { } func viewTypeForValueType(typ types.Type) types.Type { + if ptr, ok := typ.(*types.Pointer); ok { + return viewTypeForValueType(ptr.Elem()) + } viewMethod := codegen.LookupMethod(typ, "View") if viewMethod == nil { return nil @@ -399,12 +419,116 @@ func viewTypeForValueType(typ types.Type) types.Type { return sig.Results().At(0).Type() } +func viewTypeForContainerType(typ types.Type) (*types.Named, *types.Func) { + // The container type should be an instantiated generic type, + // with its first type parameter specifying the element type. + containerType, ok := typ.(*types.Named) + if !ok || containerType.TypeArgs().Len() == 0 { + return nil, nil + } + + // Look up the view type for the container type. + // It must include an additional type parameter specifying the element's view type. + // For example, Container[T] => ContainerView[T, V]. + containerViewTypeName := containerType.Obj().Name() + "View" + containerViewTypeObj, ok := containerType.Obj().Pkg().Scope().Lookup(containerViewTypeName).(*types.TypeName) + if !ok { + return nil, nil + } + containerViewGenericType, ok := containerViewTypeObj.Type().(*types.Named) + if !ok || containerViewGenericType.TypeParams().Len() != containerType.TypeArgs().Len()+1 { + return nil, nil + } + + // Create a list of type arguments for instantiating the container view type. + // Include all type arguments specified for the container type... + containerViewTypeArgs := make([]types.Type, containerViewGenericType.TypeParams().Len()) + for i := range containerType.TypeArgs().Len() { + containerViewTypeArgs[i] = containerType.TypeArgs().At(i) + } + // ...and add the element view type. + // For that, we need to first determine the named elem type... + elemType, ok := baseType(containerType.TypeArgs().At(containerType.TypeArgs().Len() - 1)).(*types.Named) + if !ok { + return nil, nil + } + // ...then infer the view type from it. + var elemViewType *types.Named + elemTypeName := elemType.Obj().Name() + elemViewTypeBaseName := elemType.Obj().Name() + "View" + if elemViewTypeName, ok := elemType.Obj().Pkg().Scope().Lookup(elemViewTypeBaseName).(*types.TypeName); ok { + // The elem's view type is already defined in the same package as the elem type. + elemViewType = elemViewTypeName.Type().(*types.Named) + } else if slices.Contains(typeNames, elemTypeName) { + // The elem's view type has not been generated yet, but we can define + // and use a blank type with the expected view type name. + elemViewTypeName = types.NewTypeName(0, elemType.Obj().Pkg(), elemViewTypeBaseName, nil) + elemViewType = types.NewNamed(elemViewTypeName, types.NewStruct(nil, nil), nil) + if elemTypeParams := elemType.TypeParams(); elemTypeParams != nil { + elemViewType.SetTypeParams(collectTypeParams(elemTypeParams)) + } + } else { + // The elem view type does not exist and won't be generated. + return nil, nil + } + // If elemType is an instantiated generic type, instantiate the elemViewType as well. + if elemTypeArgs := elemType.TypeArgs(); elemTypeArgs != nil { + elemViewType = must.Get(types.Instantiate(nil, elemViewType, collectTypes(elemTypeArgs), false)).(*types.Named) + } + // And finally set the elemViewType as the last type argument. + containerViewTypeArgs[len(containerViewTypeArgs)-1] = elemViewType + + // Instantiate the container view type with the specified type arguments. + containerViewType := must.Get(types.Instantiate(nil, containerViewGenericType, containerViewTypeArgs, false)) + // Look up a function to create a view of a container. + // It should be in the same package as the container type, named {ViewType}Of, + // and have a signature like {ViewType}Of(c *Container[T]) ContainerView[T, V]. + makeContainerView, ok := containerType.Obj().Pkg().Scope().Lookup(containerViewTypeName + "Of").(*types.Func) + if !ok { + return nil, nil + } + return containerViewType.(*types.Named), makeContainerView +} + +func baseType(typ types.Type) types.Type { + if ptr, ok := typ.(*types.Pointer); ok { + return ptr.Elem() + } + return typ +} + +func collectTypes(list *types.TypeList) []types.Type { + // TODO(nickkhyl): use slices.Collect in Go 1.23? + if list.Len() == 0 { + return nil + } + res := make([]types.Type, list.Len()) + for i := range res { + res[i] = list.At(i) + } + return res +} + +func collectTypeParams(list *types.TypeParamList) []*types.TypeParam { + if list.Len() == 0 { + return nil + } + res := make([]*types.TypeParam, list.Len()) + for i := range res { + p := list.At(i) + res[i] = types.NewTypeParam(p.Obj(), p.Constraint()) + } + return res +} + var ( flagTypes = flag.String("type", "", "comma-separated list of types; required") flagBuildTags = flag.String("tags", "", "compiler build tags to apply") flagCloneFunc = flag.Bool("clonefunc", false, "add a top-level Clone func") flagCloneOnlyTypes = flag.String("clone-only-type", "", "comma-separated list of types (a subset of --type) that should only generate a go:generate clone line and not actual views") + + typeNames []string ) func main() { @@ -415,7 +539,7 @@ func main() { flag.Usage() os.Exit(2) } - typeNames := strings.Split(*flagTypes, ",") + typeNames = strings.Split(*flagTypes, ",") var flagArgs []string flagArgs = append(flagArgs, fmt.Sprintf("-clonefunc=%v", *flagCloneFunc)) diff --git a/cmd/vnet/run-krazy.sh b/cmd/vnet/run-krazy.sh new file mode 100755 index 0000000000000..4202521e04f3e --- /dev/null +++ b/cmd/vnet/run-krazy.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +echo "Type 'C-a c' to enter monitor; q to quit." + +set -eux +qemu-system-x86_64 -M microvm,isa-serial=off \ + -m 1G \ + -nodefaults -no-user-config -nographic \ + -kernel $HOME/src/github.com/tailscale/gokrazy-kernel/vmlinuz \ + -append "console=hvc0 root=PARTUUID=60c24cc1-f3f9-427a-8199-76baa2d60001/PARTNROFF=1 ro init=/gokrazy/init panic=10 oops=panic pci=off nousb tsc=unstable clocksource=hpet tailscale-tta=1 tailscaled.env=TS_DEBUG_RAW_DISCO=1" \ + -drive id=blk0,file=$HOME/src/tailscale.com/gokrazy/natlabapp.img,format=raw \ + -device virtio-blk-device,drive=blk0 \ + -device virtio-rng-device \ + -netdev stream,id=net0,addr.type=unix,addr.path=/tmp/qemu.sock \ + -device virtio-serial-device \ + -device virtio-net-device,netdev=net0,mac=52:cc:cc:cc:cc:01 \ + -chardev stdio,id=virtiocon0,mux=on \ + -device virtconsole,chardev=virtiocon0 \ + -mon chardev=virtiocon0,mode=readline \ + -audio none + diff --git a/cmd/vnet/vnet-main.go b/cmd/vnet/vnet-main.go new file mode 100644 index 0000000000000..99eb022a8f56e --- /dev/null +++ b/cmd/vnet/vnet-main.go @@ -0,0 +1,118 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The vnet binary runs a virtual network stack in userspace for qemu instances +// to connect to and simulate various network conditions. +package main + +import ( + "context" + "flag" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "time" + + "tailscale.com/tstest/natlab/vnet" + "tailscale.com/types/logger" + "tailscale.com/util/must" +) + +var ( + listen = flag.String("listen", "/tmp/qemu.sock", "path to listen on") + nat = flag.String("nat", "easy", "type of NAT to use") + nat2 = flag.String("nat2", "hard", "type of NAT to use for second network") + portmap = flag.Bool("portmap", false, "enable portmapping") + dgram = flag.Bool("dgram", false, "enable datagram mode; for use with macOS Hypervisor.Framework and VZFileHandleNetworkDeviceAttachment") +) + +func main() { + flag.Parse() + + if _, err := os.Stat(*listen); err == nil { + os.Remove(*listen) + } + + var srv net.Listener + var err error + var conn *net.UnixConn + if *dgram { + addr, err := net.ResolveUnixAddr("unixgram", *listen) + if err != nil { + log.Fatalf("ResolveUnixAddr: %v", err) + } + conn, err = net.ListenUnixgram("unixgram", addr) + if err != nil { + log.Fatalf("ListenUnixgram: %v", err) + } + defer conn.Close() + } else { + srv, err = net.Listen("unix", *listen) + } + if err != nil { + log.Fatal(err) + } + + var c vnet.Config + node1 := c.AddNode(c.AddNetwork("2.1.1.1", "192.168.1.1/24", vnet.NAT(*nat))) + c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", vnet.NAT(*nat2))) + if *portmap { + node1.Network().AddService(vnet.NATPMP) + } + + s, err := vnet.New(&c) + if err != nil { + log.Fatalf("newServer: %v", err) + } + + if err := s.PopulateDERPMapIPs(); err != nil { + log.Printf("warning: ignoring failure to populate DERP map: %v", err) + } + + s.WriteStartingBanner(os.Stdout) + nc := s.NodeAgentClient(node1) + go func() { + rp := httputil.NewSingleHostReverseProxy(must.Get(url.Parse("http://gokrazy"))) + d := rp.Director + rp.Director = func(r *http.Request) { + d(r) + r.Header.Set("X-TTA-GoKrazy", "1") + } + rp.Transport = nc.HTTPClient.Transport + http.ListenAndServe(":8080", rp) + }() + go func() { + getStatus := func() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + st, err := nc.Status(ctx) + if err != nil { + log.Printf("NodeStatus: %v", err) + return + } + log.Printf("NodeStatus: %v", logger.AsJSON(st)) + } + for { + time.Sleep(5 * time.Second) + //continue + getStatus() + } + }() + + if conn != nil { + s.ServeUnixConn(conn, vnet.ProtocolUnixDGRAM) + return + } + + for { + c, err := srv.Accept() + if err != nil { + log.Printf("Accept: %v", err) + continue + } + go s.ServeUnixConn(c.(*net.UnixConn), vnet.ProtocolQEMU) + } +} diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index b1d991ce6662b..072b43447dc3a 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -333,6 +333,9 @@ func (c *Direct) Close() error { } } c.noiseClient = nil + if tr, ok := c.httpc.Transport.(*http.Transport); ok { + tr.CloseIdleConnections() + } return nil } diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index c3b2e54b8c4ab..e01cb1f9a4cc5 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -46,6 +46,7 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" "tailscale.com/net/tshttpproxy" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/util/multierr" @@ -497,11 +498,9 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, addr netip.Addr, tr.DisableCompression = true // (mis)use httptrace to extract the underlying net.Conn from the - // transport. We make exactly 1 request using this transport, so - // there will be exactly 1 GotConn call. Additionally, the - // transport handles 101 Switching Protocols correctly, such that - // the Conn will not be reused or kept alive by the transport once - // the response has been handed back from RoundTrip. + // transport. The transport handles 101 Switching Protocols correctly, + // such that the Conn will not be reused or kept alive by the transport + // once the response has been handed back from RoundTrip. // // In theory, the machinery of net/http should make it such that // the trace callback happens-before we get the response, but @@ -517,10 +516,16 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, addr netip.Addr, // unexpected EOFs...), and we're bound to forget someday and // introduce a protocol optimization at a higher level that starts // eagerly transmitting from the server. - connCh := make(chan net.Conn, 1) + var lastConn syncs.AtomicValue[net.Conn] trace := httptrace.ClientTrace{ + // Even though we only make a single HTTP request which should + // require a single connection, the context (with the attached + // trace configuration) might be used by our custom dialer to + // make other HTTP requests (e.g. BootstrapDNS). We only care + // about the last connection made, which should be the one to + // the control server. GotConn: func(info httptrace.GotConnInfo) { - connCh <- info.Conn + lastConn.Store(info.Conn) }, } ctx = httptrace.WithClientTrace(ctx, &trace) @@ -548,11 +553,7 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, addr netip.Addr, // is still a read buffer attached to it within resp.Body. So, we // must direct I/O through resp.Body, but we can still use the // underlying net.Conn for stuff like deadlines. - var switchedConn net.Conn - select { - case switchedConn = <-connCh: - default: - } + switchedConn := lastConn.Load() if switchedConn == nil { resp.Body.Close() return nil, fmt.Errorf("httptrace didn't provide a connection") diff --git a/control/controlhttp/client_js.go b/control/controlhttp/client_js.go index 7ad5963660d74..4b7126b52cf38 100644 --- a/control/controlhttp/client_js.go +++ b/control/controlhttp/client_js.go @@ -10,7 +10,7 @@ import ( "net" "net/url" - "nhooyr.io/websocket" + "github.com/coder/websocket" "tailscale.com/control/controlbase" "tailscale.com/net/wsconn" ) diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index ba2767289222b..8c8ed7f5701b0 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -11,10 +11,12 @@ import ( "log" "net" "net/http" + "net/http/httptest" "net/http/httputil" "net/netip" "net/url" "runtime" + "slices" "strconv" "sync" "testing" @@ -41,6 +43,8 @@ type httpTestParam struct { makeHTTPHangAfterUpgrade bool doEarlyWrite bool + + httpInDial bool } func TestControlHTTP(t *testing.T) { @@ -120,6 +124,12 @@ func TestControlHTTP(t *testing.T) { name: "early_write", doEarlyWrite: true, }, + // Dialer needed to make another HTTP request along the way (e.g. to + // resolve the hostname via BootstrapDNS). + { + name: "http_request_in_dial", + httpInDial: true, + }, } for _, test := range tests { @@ -217,6 +227,29 @@ func testControlHTTP(t *testing.T, param httpTestParam) { Clock: clock, } + if param.httpInDial { + // Spin up a separate server to get a different port on localhost. + secondServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return })) + defer secondServer.Close() + + prev := a.Dialer + a.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + req, err := http.NewRequestWithContext(ctx, "GET", secondServer.URL, nil) + if err != nil { + t.Errorf("http.NewRequest: %v", err) + } + r, err := http.DefaultClient.Do(req) + if err != nil { + t.Errorf("http.Get: %v", err) + } + r.Body.Close() + + return prev(ctx, network, addr) + } + } + if proxy != nil { proxyEnv := proxy.Start(t) defer proxy.Close() @@ -238,6 +271,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { t.Fatalf("dialing controlhttp: %v", err) } defer conn.Close() + si := <-sch if si.conn != nil { defer si.conn.Close() @@ -266,6 +300,19 @@ func testControlHTTP(t *testing.T, param httpTestParam) { t.Errorf("early write = %q; want %q", buf, earlyWriteMsg) } } + + // When no proxy is used, the RemoteAddr of the returned connection should match + // one of the listeners of the test server. + if proxy == nil { + var expectedAddrs []string + for _, ln := range []net.Listener{httpLn, httpsLn} { + expectedAddrs = append(expectedAddrs, fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port)) + expectedAddrs = append(expectedAddrs, fmt.Sprintf("[::1]:%d", ln.Addr().(*net.TCPAddr).Port)) + } + if !slices.Contains(expectedAddrs, conn.RemoteAddr().String()) { + t.Errorf("unexpected remote addr: %s, want %s", conn.RemoteAddr(), expectedAddrs) + } + } } type serverResult struct { diff --git a/control/controlhttp/server.go b/control/controlhttp/server.go index ee469fabda632..6a0d2bc5682a9 100644 --- a/control/controlhttp/server.go +++ b/control/controlhttp/server.go @@ -14,7 +14,7 @@ import ( "strings" "time" - "nhooyr.io/websocket" + "github.com/coder/websocket" "tailscale.com/control/controlbase" "tailscale.com/net/netutil" "tailscale.com/net/wsconn" diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 7315a10f72ee7..dd76a3abdba5b 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -99,6 +99,10 @@ type Knobs struct { // DisableCryptorouting indicates that the node should not use the // magicsock crypto routing feature. DisableCryptorouting atomic.Bool + + // DisableCaptivePortalDetection is whether the node should not perform captive portal detection + // automatically when the network state changes. + DisableCaptivePortalDetection atomic.Bool } // UpdateFromNodeAttributes updates k (if non-nil) based on the provided self @@ -127,6 +131,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { disableSplitDNSWhenNoCustomResolvers = has(tailcfg.NodeAttrDisableSplitDNSWhenNoCustomResolvers) disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT) disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting) + disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) ) if has(tailcfg.NodeAttrOneCGNATEnable) { @@ -153,6 +158,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableSplitDNSWhenNoCustomResolvers.Store(disableSplitDNSWhenNoCustomResolvers) k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) k.DisableCryptorouting.Store(disableCryptorouting) + k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) } // AsDebugJSON returns k as something that can be marshalled with json.Marshal @@ -180,5 +186,6 @@ func (k *Knobs) AsDebugJSON() map[string]any { "DisableSplitDNSWhenNoCustomResolvers": k.DisableSplitDNSWhenNoCustomResolvers.Load(), "DisableLocalDNSOverrideViaNRPT": k.DisableLocalDNSOverrideViaNRPT.Load(), "DisableCryptorouting": k.DisableCryptorouting.Load(), + "DisableCaptivePortalDetection": k.DisableCaptivePortalDetection.Load(), } } diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index c5d7c95b2751f..b8cce8cdcb4fa 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -381,6 +381,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien }() var node *tailcfg.DERPNode // nil when using c.url to dial + var idealNodeInRegion bool switch { case useWebsockets(): var urlStr string @@ -421,6 +422,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien default: c.logf("%s: connecting to derp-%d (%v)", caller, reg.RegionID, reg.RegionCode) tcpConn, node, err = c.dialRegion(ctx, reg) + idealNodeInRegion = err == nil && reg.Nodes[0] == node } if err != nil { return nil, 0, err @@ -494,6 +496,18 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien } req.Header.Set("Upgrade", "DERP") req.Header.Set("Connection", "Upgrade") + if !idealNodeInRegion && reg != nil { + // This is purely informative for now (2024-07-06) for stats: + req.Header.Set("Ideal-Node", reg.Nodes[0].Name) + // TODO(bradfitz,raggi): start a time.AfterFunc for 30m-1h or so to + // dialNode(reg.Nodes[0]) and see if we can even TCP connect to it. If + // so, TLS handshake it as well (which is mixed up in this massive + // connect method) and then if it all appears good, grab the mutex, bump + // connGen, finish the Upgrade, close the old one, and set a new field + // on Client that's like "here's the connect result and connGen for the + // next connect that comes in"). Tracking bug for all this is: + // https://github.com/tailscale/tailscale/issues/12724 + } if !serverPub.IsZero() && serverProtoVersion != 0 { // parseMetaCert found the server's public key (no TLS diff --git a/derp/derphttp/derphttp_server.go b/derp/derphttp/derphttp_server.go index d1193f38303a7..41ce86764f66a 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derphttp/derphttp_server.go @@ -18,6 +18,7 @@ import ( // following its HTTP request. const fastStartHeader = "Derp-Fast-Start" +// Handler returns an http.Handler to be mounted at /derp, serving s. func Handler(s *derp.Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // These are installed both here and in cmd/derper. The check here @@ -79,3 +80,29 @@ func ProbeHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "bogus probe method", http.StatusMethodNotAllowed) } } + +// ServeNoContent generates the /generate_204 response used by Tailscale's +// captive portal detection. +func ServeNoContent(w http.ResponseWriter, r *http.Request) { + if challenge := r.Header.Get(NoContentChallengeHeader); challenge != "" { + badChar := strings.IndexFunc(challenge, func(r rune) bool { + return !isChallengeChar(r) + }) != -1 + if len(challenge) <= 64 && !badChar { + w.Header().Set(NoContentResponseHeader, "response "+challenge) + } + } + w.WriteHeader(http.StatusNoContent) +} + +func isChallengeChar(c rune) bool { + // Semi-randomly chosen as a limited set of valid characters + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9') || + c == '.' || c == '-' || c == '_' +} + +const ( + NoContentChallengeHeader = "X-Tailscale-Challenge" + NoContentResponseHeader = "X-Tailscale-Response" +) diff --git a/derp/derphttp/websocket.go b/derp/derphttp/websocket.go index 08e40185401a0..6ef47473a2532 100644 --- a/derp/derphttp/websocket.go +++ b/derp/derphttp/websocket.go @@ -10,7 +10,7 @@ import ( "log" "net" - "nhooyr.io/websocket" + "github.com/coder/websocket" "tailscale.com/net/wsconn" ) diff --git a/flake.nix b/flake.nix index d84199141c575..b6c34e9c0c666 100644 --- a/flake.nix +++ b/flake.nix @@ -120,4 +120,4 @@ in flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); } -# nix-direnv cache busting line: sha256-CRzwQpi//TuLU8P66Dh4IdmM96f1YF10XyFfFBF4pQA= +# nix-direnv cache busting line: sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8= diff --git a/go.mod b/go.mod index a0ee81395ff95..7002c3410a6ce 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.22.0 require ( filippo.io/mkcert v1.4.4 - fybrik.io/crdoc v0.6.3 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa github.com/andybalholm/brotli v1.1.0 @@ -16,9 +15,10 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 github.com/bramvdbogaerde/go-scp v1.4.0 github.com/cilium/ebpf v0.15.0 + github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/creack/pty v1.1.21 + github.com/creack/pty v1.1.23 github.com/dave/courtney v0.4.0 github.com/dave/patsy v0.0.0-20210517141501-957256f50cba github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa @@ -26,6 +26,7 @@ require ( github.com/distribution/reference v0.6.0 github.com/djherbis/times v1.6.0 github.com/dsnet/try v0.0.3 + github.com/elastic/crd-ref-docs v0.0.12 github.com/evanw/esbuild v0.19.11 github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.6.0 @@ -39,6 +40,7 @@ require ( github.com/golangci/golangci-lint v1.52.2 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.18.0 + github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 github.com/google/uuid v1.6.0 github.com/goreleaser/nfpm/v2 v2.33.1 @@ -80,37 +82,36 @@ require ( github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 - github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 + github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 + github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 github.com/toqueteos/webbrowser v1.2.0 github.com/u-root/u-root v0.12.0 github.com/vishvananda/netlink v1.2.1-beta.2 github.com/vishvananda/netns v0.0.4 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20220726221520-4f986261bf13 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.24.0 + golang.org/x/crypto v0.25.0 golang.org/x/exp v0.0.0-20240119083558-1b970713d09a - golang.org/x/mod v0.18.0 - golang.org/x/net v0.26.0 + golang.org/x/mod v0.19.0 + golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 - golang.org/x/term v0.21.0 + golang.org/x/sys v0.22.0 + golang.org/x/term v0.22.0 golang.org/x/time v0.5.0 - golang.org/x/tools v0.22.0 + golang.org/x/tools v0.23.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 - gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 + gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 honnef.co/go/tools v0.4.6 - k8s.io/api v0.30.1 - k8s.io/apimachinery v0.30.1 - k8s.io/apiserver v0.30.1 - k8s.io/client-go v0.30.1 - nhooyr.io/websocket v1.8.10 + k8s.io/api v0.30.3 + k8s.io/apimachinery v0.30.3 + k8s.io/apiserver v0.30.3 + k8s.io/client-go v0.30.3 sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab sigs.k8s.io/yaml v1.4.0 @@ -118,6 +119,7 @@ require ( ) require ( + github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect @@ -128,13 +130,16 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gobuffalo/flect v1.0.2 // indirect + github.com/goccy/go-yaml v1.12.0 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/securecookie v1.1.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect ) require ( @@ -194,7 +199,7 @@ require ( github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/docker/cli v25.0.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.4+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -202,7 +207,7 @@ require ( github.com/ettle/strcase v0.1.1 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 @@ -211,7 +216,7 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-git/go-git/v5 v5.11.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.7 // indirect @@ -253,7 +258,7 @@ require ( github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect - github.com/huandu/xstrings v1.4.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -338,14 +343,14 @@ require ( github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/stretchr/testify v1.8.4 + github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/testify v1.9.0 github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 @@ -380,10 +385,10 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 howett.net/plist v1.0.0 // indirect - k8s.io/apiextensions-apiserver v0.30.1 // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/apiextensions-apiserver v0.30.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20240102154912-e7106e64919e + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 mvdan.cc/gofumpt v0.5.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect diff --git a/go.mod.sri b/go.mod.sri index e85f9fa991dde..a44020130921e 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-CRzwQpi//TuLU8P66Dh4IdmM96f1YF10XyFfFBF4pQA= +sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8= diff --git a/go.sum b/go.sum index 24cd10b7d0ba5..dbdb7168a9739 100644 --- a/go.sum +++ b/go.sum @@ -46,8 +46,6 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= -fybrik.io/crdoc v0.6.3 h1:jNNAVINu8up5vrLa0jrV7z7HSlyHF/6lNOrAtrXwYlI= -fybrik.io/crdoc v0.6.3/go.mod h1:kvZRt7VAzOyrmDpIqREtcKAVFSJYEBoAyniYebsJGtQ= github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= @@ -75,6 +73,8 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0 github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= @@ -218,6 +218,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= @@ -226,10 +228,10 @@ github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= @@ -262,8 +264,8 @@ github.com/docker/cli v25.0.0+incompatible h1:zaimaQdnX7fYWFqzN88exE9LDEvRslexpF github.com/docker/cli v25.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= +github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -272,6 +274,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= +github.com/elastic/crd-ref-docs v0.0.12 h1:F3seyncbzUz3rT3d+caeYWhumb5ojYQ6Bl0Z+zOp16M= +github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= @@ -294,8 +298,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanw/esbuild v0.19.11 h1:mbPO1VJ/df//jjUd+p/nRLYCpizXxXb2w/zZMShxa2k= github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -338,8 +342,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -352,6 +356,12 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -381,6 +391,8 @@ github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= +github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= @@ -467,6 +479,8 @@ github.com/google/go-containerregistry v0.18.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -542,8 +556,8 @@ github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3s github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f h1:ov45/OzrJG8EKbGjn7jJZQJTN7Z1t73sFYNIRd64YlI= github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f/go.mod h1:JoDrYMZpDPYo6uH9/f6Peqms3zNNWT2XiGgioMOIGuI= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -636,6 +650,8 @@ github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUc github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= @@ -687,6 +703,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -859,8 +877,8 @@ github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -874,8 +892,9 @@ github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8L github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -887,8 +906,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/studio-b12/gowebdav v0.9.0 h1:1j1sc9gQnNxbXXM4M/CebPOX4aXYtr7MojAVcN4dHjU= github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= @@ -919,10 +938,10 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:t github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk= github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= @@ -1013,8 +1032,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= @@ -1033,8 +1052,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1083,8 +1102,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1129,8 +1148,8 @@ golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1224,8 +1243,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1234,8 +1253,8 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1330,12 +1349,14 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg= golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= @@ -1474,8 +1495,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1487,22 +1508,22 @@ honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= -k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= -k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= -k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= -k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= -k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= -k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= +k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= +k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= +k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= +k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= +k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= +k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= @@ -1511,8 +1532,6 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphD mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 h1:VuJo4Mt0EVPychre4fNlDWDuE5AjXtPJpRUWqZDQhaI= mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/go.toolchain.rev b/go.toolchain.rev index a3beee04d237a..7d064e9660851 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -2f152a4eff5875655a9a84fce8f8d329f8d9a321 +22ef9eb38e9a2d21b4a45f7adc75addb05f3efb8 diff --git a/gokrazy/.gitignore b/gokrazy/.gitignore index cdd044ae2d806..0a5235f29aa1e 100644 --- a/gokrazy/.gitignore +++ b/gokrazy/.gitignore @@ -1,2 +1,3 @@ -tsapp.img +*.qcow2 +*.img go.work diff --git a/gokrazy/Makefile b/gokrazy/Makefile index f086dd26b11cc..bc55f2a52acb5 100644 --- a/gokrazy/Makefile +++ b/gokrazy/Makefile @@ -6,3 +6,8 @@ image: qemu: image qemu-system-x86_64 -m 1G -drive file=tsapp.img,format=raw -boot d -netdev user,id=user.0 -device virtio-net-pci,netdev=user.0 -serial mon:stdio -audio none + +# For natlab integration tests: +natlab: + go run build.go --build --app=natlabapp + qemu-img convert -O qcow2 natlabapp.img natlabapp.qcow2 diff --git a/gokrazy/build.go b/gokrazy/build.go index 9bfe26d2553b4..a8373d0d0d7e9 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -22,10 +22,12 @@ import ( "path/filepath" "regexp" "runtime" + "strings" "time" ) var ( + app = flag.String("app", "tsapp", "appliance name; one of the subdirectories of gokrazy/") bucket = flag.String("bucket", "tskrazy-import", "S3 bucket to upload disk image to while making AMI") build = flag.Bool("build", false, "if true, just build locally and stop, without uploading") ) @@ -53,6 +55,10 @@ func findMkfsExt4() (string, error) { func main() { flag.Parse() + if *app == "" || strings.Contains(*app, "/") { + log.Fatalf("--app must be non-empty name such as 'tsapp' or 'natlabapp'") + } + if err := buildImage(); err != nil { log.Fatalf("build image: %v", err) } @@ -75,7 +81,7 @@ func main() { } log.Printf("snap ID: %v", snapID) - ami, err := makeAMI(fmt.Sprintf("tsapp-%d", time.Now().Unix()), snapID) + ami, err := makeAMI(fmt.Sprintf(*app+"-%d", time.Now().Unix()), snapID) if err != nil { log.Fatalf("makeAMI: %v", err) } @@ -92,8 +98,8 @@ func buildImage() error { if err != nil { return err } - if fi, err := os.Stat(filepath.Join(dir, "tsapp")); err != nil || !fi.IsDir() { - return fmt.Errorf("in wrong directorg %v; no tsapp subdirectory found", dir) + if fi, err := os.Stat(filepath.Join(dir, *app)); err != nil || !fi.IsDir() { + return fmt.Errorf("in wrong directorg %v; no %q subdirectory found", dir, *app) } // Build the tsapp.img var buf bytes.Buffer @@ -101,9 +107,9 @@ func buildImage() error { "-exec=env GOOS=linux GOARCH=amd64 ", "github.com/gokrazy/tools/cmd/gok", "--parent_dir="+dir, - "--instance=tsapp", + "--instance="+*app, "overwrite", - "--full", "tsapp.img", + "--full", *app+".img", "--target_storage_bytes=1258299392") cmd.Stdout = io.MultiWriter(os.Stdout, &buf) cmd.Stderr = os.Stderr @@ -135,14 +141,14 @@ func buildImage() error { } func copyToS3() error { - cmd := exec.Command("aws", "s3", "cp", "tsapp.img", "s3://"+*bucket+"/") + cmd := exec.Command("aws", "s3", "cp", *app+".img", "s3://"+*bucket+"/") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } func startImportSnapshot() (importTaskID string, err error) { - out, err := exec.Command("aws", "ec2", "import-snapshot", "--disk-container", "Url=s3://"+*bucket+"/tsappp.img").CombinedOutput() + out, err := exec.Command("aws", "ec2", "import-snapshot", "--disk-container", "Url=s3://"+*bucket+"/"+*app+".img").CombinedOutput() if err != nil { return "", fmt.Errorf("import snapshot: %v: %s", err, out) } diff --git a/gokrazy/go.mod b/gokrazy/go.mod index 896ff7ecd7baa..8c898d3cab17e 100644 --- a/gokrazy/go.mod +++ b/gokrazy/go.mod @@ -2,12 +2,12 @@ module tailscale.com/gokrazy go 1.22 -require github.com/gokrazy/tools v0.0.0-20240510170341-34b02e215bc2 +require github.com/gokrazy/tools v0.0.0-20240730192548-9f81add3a91e require ( github.com/breml/rootcerts v0.2.10 // indirect github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 // indirect - github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a // indirect + github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -18,8 +18,6 @@ require ( golang.org/x/sys v0.20.0 // indirect ) -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a +replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 -replace github.com/gokrazy/tools => github.com/tailscale/gokrazy-tools v0.0.0-20240602210012-933640538dcf - -replace github.com/gokrazy/internal => github.com/tailscale/gokrazy-internal v0.0.0-20240602195241-04c5eda9f6cd +replace github.com/gokrazy/tools => github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e diff --git a/gokrazy/go.sum b/gokrazy/go.sum index 04522e46859cf..dfac8ca37d101 100644 --- a/gokrazy/go.sum +++ b/gokrazy/go.sum @@ -3,6 +3,8 @@ github.com/breml/rootcerts v0.2.10/go.mod h1:24FDtzYMpqIeYC7QzaE8VPRQaFZU5TIUDly github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 h1:kBY5R1tSf+EYZ+QaSrofLaVJtBqYsVNVBWkdMq3Smcg= github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2/go.mod h1:PYOvzGOL4nlBmuxu7IyKQTFLaxr61+WPRNRzVtuYOHw= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -17,10 +19,8 @@ github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy-internal v0.0.0-20240602195241-04c5eda9f6cd h1:ZJplHHhYSzxYmrXuDPCNChGRZbLkPqRkYqRBM7KNyng= -github.com/tailscale/gokrazy-internal v0.0.0-20240602195241-04c5eda9f6cd/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/tailscale/gokrazy-tools v0.0.0-20240602210012-933640538dcf h1:lmAGqLbIVoMK1TYWqJvxKFsu+Tb1OecgvXTmypZGAZY= -github.com/tailscale/gokrazy-tools v0.0.0-20240602210012-933640538dcf/go.mod h1:+PSix9a8BHqAz6RV/9+tiE3C1ou0GA1ViR8pqAZVfwI= +github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e h1:3/xIc1QCvnKL7BCLng9od98HEvxCadjvqiI/bN+Twso= +github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e/go.mod h1:eTZ0QsugEPFU5UAQ/87bKMkPxQuTNa7+iFAIahOFwRg= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= diff --git a/gokrazy/natlabapp/README.md b/gokrazy/natlabapp/README.md new file mode 100644 index 0000000000000..7fc9dd15901d8 --- /dev/null +++ b/gokrazy/natlabapp/README.md @@ -0,0 +1,6 @@ +# NATLab Linux test Appliance + +This is the definition of the NATLab Linux test appliance image. +It's similar to ../tsapp, but optimized for running in qemu in NATLab. + +See ../tsapp/README.md for more info. diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod new file mode 100644 index 0000000000000..c56dede46ed65 --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod @@ -0,0 +1,18 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require ( + github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/josharian/native v1.0.0 // indirect + github.com/mdlayher/packet v1.0.0 // indirect + github.com/mdlayher/socket v0.2.3 // indirect + github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect + github.com/vishvananda/netlink v1.1.0 // indirect + github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sys v0.20.0 // indirect +) diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum new file mode 100644 index 0000000000000..3cd002ae782b1 --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum @@ -0,0 +1,39 @@ +github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= +github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= +github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= +github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= +github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod new file mode 100644 index 0000000000000..33656efeea7d7 --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod @@ -0,0 +1,15 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require ( + github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect + github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/kenshaw/evdev v0.1.0 // indirect + github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.20.0 // indirect +) + +replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/heartbeat/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum similarity index 56% rename from gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/heartbeat/go.sum rename to gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum index 4c510f0492937..479eb1cef1ca7 100644 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/heartbeat/go.sum +++ b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum @@ -2,6 +2,8 @@ github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZ github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= @@ -10,5 +12,12 @@ github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFe github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= +github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= +github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= +github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= +github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= +github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= +golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod new file mode 100644 index 0000000000000..de52e181b9c3c --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod @@ -0,0 +1,5 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum new file mode 100644 index 0000000000000..8135f60c3e791 --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum @@ -0,0 +1,26 @@ +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= +github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= +github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= +github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= +github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= +github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= +github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= +golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod new file mode 100644 index 0000000000000..ec4d9c64fc93e --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod @@ -0,0 +1,5 @@ +module gokrazy/build/tsapp + +go 1.22.2 + +require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum new file mode 100644 index 0000000000000..d32d5460bf29c --- /dev/null +++ b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum @@ -0,0 +1,4 @@ +github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= +github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.mod b/gokrazy/natlabapp/builddir/tailscale.com/go.mod new file mode 100644 index 0000000000000..6231e86de24f5 --- /dev/null +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.mod @@ -0,0 +1,9 @@ +module gokrazy/build/tsapp + +go 1.22.0 + +toolchain go1.22.2 + +replace tailscale.com => ../../../.. + +require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscaled/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum similarity index 71% rename from gokrazy/tsapp/builddir/tailscale.com/cmd/tailscaled/go.sum rename to gokrazy/natlabapp/builddir/tailscale.com/go.sum index 1a62f9f18636c..b3b73e2d0e764 100644 --- a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscaled/go.sum +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.sum @@ -40,10 +40,10 @@ github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yez github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gaissmai/bart v0.4.1 h1:G1t58voWkNmT47lBDawH5QhtTDsdqRIO+ftq5x4P9Ls= -github.com/gaissmai/bart v0.4.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= +github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= @@ -54,8 +54,8 @@ github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= @@ -74,12 +74,18 @@ github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0 github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= @@ -92,14 +98,18 @@ github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= +github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780 h1:U0J2CUrrTcc2wmr9tSLYEo+USfwNikRRsmxVLD4eZ7E= -github.com/tailscale/golang-x-crypto v0.0.0-20240108194725-7ce1f622c780/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= @@ -110,12 +120,18 @@ github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVL github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754 h1:iazWjqVHE6CbNam7WXRhi33Qad5o7a8LVYgVoILpZdI= -github.com/tailscale/wireguard-go v0.0.0-20240429185444-03c5a0ccf754/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= +github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= +github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= +github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= @@ -130,25 +146,43 @@ go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -tailscale.com v1.66.4 h1:V0vTQah3xi2/zbsxJeOfl5QbO1WJPeD9TMlfL0daXqc= -tailscale.com v1.66.4/go.mod h1:99BIV4U3UPw36Sva04xK2ZsEpVRUkY9jCdEDSAhaNGM= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/natlabapp/config.json b/gokrazy/natlabapp/config.json new file mode 100644 index 0000000000000..902f14acdb940 --- /dev/null +++ b/gokrazy/natlabapp/config.json @@ -0,0 +1,27 @@ +{ + "Hostname": "natlabapp", + "Update": { + "NoPassword": true + }, + "SerialConsole": "ttyS0,115200", + "GokrazyPackages": [ + "github.com/gokrazy/gokrazy/cmd/dhcp" + ], + "Packages": [ + "github.com/gokrazy/serial-busybox", + "tailscale.com/cmd/tailscale", + "tailscale.com/cmd/tailscaled", + "tailscale.com/cmd/tta" + ], + "PackageConfig": { + "tailscale.com/cmd/tailscale": { + "ExtraFilePaths": { + "/usr": "usr-dir" + } + } + }, + "KernelPackage": "github.com/tailscale/gokrazy-kernel", + "FirmwarePackage": "", + "EEPROMPackage": "", + "InternalCompatibilityFlags": {} +} diff --git a/gokrazy/natlabapp/usr-dir.tar b/gokrazy/natlabapp/usr-dir.tar new file mode 100644 index 0000000000000..6ef6cfbfd3e1a Binary files /dev/null and b/gokrazy/natlabapp/usr-dir.tar differ diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/heartbeat/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/heartbeat/go.mod deleted file mode 100644 index d851081bbc660..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/heartbeat/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/randomd/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/randomd/go.mod deleted file mode 100644 index d851081bbc660..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/randomd/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/randomd/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/randomd/go.sum deleted file mode 100644 index 0e63641e90c7d..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/randomd/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod index f4172258d610c..33656efeea7d7 100644 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod +++ b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod @@ -2,6 +2,14 @@ module gokrazy/build/tsapp go 1.22.2 -require github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect +require ( + github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect + github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/kenshaw/evdev v0.1.0 // indirect + github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.20.0 // indirect +) -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a +replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum index 1a80c04cfad07..479eb1cef1ca7 100644 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum +++ b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum @@ -2,6 +2,8 @@ github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZ github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= +github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= @@ -12,5 +14,10 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= +github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= +github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= +github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= +github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= +golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/mkfs/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/mkfs/go.mod deleted file mode 100644 index 8aa5a2955b16a..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/mkfs/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/mkfs v0.0.0-20230114091253-b6755f9e9632 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/mkfs/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/mkfs/go.sum deleted file mode 100644 index 551d1b5d7b4f1..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/mkfs/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/gokrazy/internal v0.0.0-20210621162516-1b3b5687a06d h1:qk95CKJfxvU5oi3lbrVkEgID5ak1pOjTyPTdaXs6Q9E= -github.com/gokrazy/internal v0.0.0-20210621162516-1b3b5687a06d/go.mod h1:Gqv1x1DNrObmBvVvblpZbvZizZ0dU5PwiwYHipmtY9Y= -github.com/gokrazy/mkfs v0.0.0-20230114091253-b6755f9e9632 h1:Vt3rJdB4p56yjK4CKhb/awHT6Qj7LoC3CwPoOaiNS6k= -github.com/gokrazy/mkfs v0.0.0-20230114091253-b6755f9e9632/go.mod h1:O2w1ipGvg78u3F61FnLp36Db3MsUbdy8E/ciG64jbGY= diff --git a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod index 570b3ef4c6e61..ec4d9c64fc93e 100644 --- a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod @@ -2,4 +2,4 @@ module gokrazy/build/tsapp go 1.22.2 -require github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 // indirect +require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum index edb1b5c057b3a..d32d5460bf29c 100644 --- a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum @@ -1,2 +1,4 @@ github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscale/go.mod b/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscale/go.mod deleted file mode 100644 index 433b653d37d4f..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscale/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require tailscale.com v1.66.4 // indirect diff --git a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscale/go.sum b/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscale/go.sum deleted file mode 100644 index c2392cb726b2a..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscale/go.sum +++ /dev/null @@ -1,86 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= -github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= -tailscale.com v1.66.4 h1:V0vTQah3xi2/zbsxJeOfl5QbO1WJPeD9TMlfL0daXqc= -tailscale.com v1.66.4/go.mod h1:99BIV4U3UPw36Sva04xK2ZsEpVRUkY9jCdEDSAhaNGM= diff --git a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscaled/go.mod b/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscaled/go.mod deleted file mode 100644 index 433b653d37d4f..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/cmd/tailscaled/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require tailscale.com v1.66.4 // indirect diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.mod b/gokrazy/tsapp/builddir/tailscale.com/go.mod new file mode 100644 index 0000000000000..6231e86de24f5 --- /dev/null +++ b/gokrazy/tsapp/builddir/tailscale.com/go.mod @@ -0,0 +1,9 @@ +module gokrazy/build/tsapp + +go 1.22.0 + +toolchain go1.22.2 + +replace tailscale.com => ../../../.. + +require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.sum b/gokrazy/tsapp/builddir/tailscale.com/go.sum new file mode 100644 index 0000000000000..b3b73e2d0e764 --- /dev/null +++ b/gokrazy/tsapp/builddir/tailscale.com/go.sum @@ -0,0 +1,188 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= +github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= +github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= +github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= +github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= +github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= +github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= +github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= +github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= +github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= +github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= +github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= +github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= +github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= +github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= +github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= +github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= +github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= +github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= +github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= +github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= +github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= +github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= +github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= +github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= +github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= +github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= +github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= +github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= +github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= +github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= +github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= +github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= +github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= +github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= +go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= +gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= +gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= +nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/tsapp/config.json b/gokrazy/tsapp/config.json index 6445eb89e921d..33dd98a962043 100644 --- a/gokrazy/tsapp/config.json +++ b/gokrazy/tsapp/config.json @@ -1,7 +1,14 @@ { "Hostname": "tsapp", - "Update": { "NoPassword": true }, + "Update": { + "NoPassword": true + }, "SerialConsole": "ttyS0,115200", + "GokrazyPackages": [ + "github.com/gokrazy/gokrazy/cmd/dhcp", + "github.com/gokrazy/gokrazy/cmd/randomd", + "github.com/gokrazy/gokrazy/cmd/ntp" + ], "Packages": [ "github.com/gokrazy/serial-busybox", "github.com/gokrazy/breakglass", @@ -10,7 +17,9 @@ ], "PackageConfig": { "github.com/gokrazy/breakglass": { - "CommandLineFlags": [ "-authorized_keys=ec2" ] + "CommandLineFlags": [ + "-authorized_keys=ec2" + ] }, "tailscale.com/cmd/tailscale": { "ExtraFilePaths": { diff --git a/health/args.go b/health/args.go index ced42bba8d321..01a75aa2d79f3 100644 --- a/health/args.go +++ b/health/args.go @@ -32,4 +32,8 @@ const ( // ArgServerName provides a Warnable with the hostname of a server involved in the unhealthy state. ArgServerName Arg = "server-name" + + // ArgServerName provides a Warnable with comma delimited list of the hostname of the servers involved in the unhealthy state. + // If no nameservers were available to query, this will be an empty string. + ArgDNSServers Arg = "dns-servers" ) diff --git a/health/health.go b/health/health.go index f1fe8eccbf082..10549b523a008 100644 --- a/health/health.go +++ b/health/health.go @@ -6,6 +6,7 @@ package health import ( + "context" "errors" "fmt" "maps" @@ -64,6 +65,11 @@ type Tracker struct { // magicsock receive functions: IPv4, IPv6, and DERP. MagicSockReceiveFuncs [3]ReceiveFuncStats // indexed by ReceiveFunc values + // initOnce guards the initialization of the Tracker. + // Notably, it initializes the MagicSockReceiveFuncs names. + // mu should not be held during init. + initOnce sync.Once + // mu guards everything that follows. mu sync.Mutex @@ -432,6 +438,7 @@ func (t *Tracker) RegisterWatcher(cb func(w *Warnable, r *UnhealthyState)) (unre if t.nil() { return func() {} } + t.initOnce.Do(t.doOnceInit) t.mu.Lock() defer t.mu.Unlock() if t.watchers == nil { @@ -858,6 +865,7 @@ func (t *Tracker) timerSelfCheck() { if t.nil() { return } + t.initOnce.Do(t.doOnceInit) t.mu.Lock() defer t.mu.Unlock() t.checkReceiveFuncsLocked() @@ -987,8 +995,12 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { } if t.lastLoginErr != nil { + var errMsg string + if !errors.Is(t.lastLoginErr, context.Canceled) { + errMsg = t.lastLoginErr.Error() + } t.setUnhealthyLocked(LoginStateWarnable, Args{ - ArgError: t.lastLoginErr.Error(), + ArgError: errMsg, }) return } else { @@ -1163,6 +1175,11 @@ type ReceiveFuncStats struct { missing bool } +// Name returns the name of the receive func ("ReceiveIPv4", "ReceiveIPv6", etc). +func (s *ReceiveFuncStats) Name() string { + return s.name +} + func (s *ReceiveFuncStats) Enter() { s.numCalls.Add(1) s.inCall.Store(true) @@ -1180,15 +1197,20 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { if t == nil { return nil } + t.initOnce.Do(t.doOnceInit) return &t.MagicSockReceiveFuncs[which] } +func (t *Tracker) doOnceInit() { + for i := range t.MagicSockReceiveFuncs { + f := &t.MagicSockReceiveFuncs[i] + f.name = (ReceiveFunc(i)).String() + } +} + func (t *Tracker) checkReceiveFuncsLocked() { for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] - if f.name == "" { - f.name = (ReceiveFunc(i)).String() - } if runtime.GOOS == "js" && i < 2 { // Skip IPv4 and IPv6 on js. continue diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 4ba7b4d6102c6..330669aea67b9 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -27,6 +27,7 @@ import ( "tailscale.com/util/dnsname" "tailscale.com/util/lineread" "tailscale.com/version" + "tailscale.com/version/distro" ) var started = time.Now() @@ -462,3 +463,15 @@ func IsSELinuxEnforcing() bool { out, _ := exec.Command("getenforce").Output() return string(bytes.TrimSpace(out)) == "Enforcing" } + +// IsNATLabGuestVM reports whether the current host is a NAT Lab guest VM. +func IsNATLabGuestVM() bool { + if runtime.GOOS == "linux" && distro.Get() == distro.Gokrazy { + cmdLine, _ := os.ReadFile("/proc/cmdline") + return bytes.Contains(cmdLine, []byte("tailscale-tta=1")) + } + return false +} + +// NAT Lab VMs have a unique MAC address prefix. +// See diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go index d194fbb0da8b4..53d4187bc0c67 100644 --- a/hostinfo/hostinfo_linux.go +++ b/hostinfo/hostinfo_linux.go @@ -159,7 +159,14 @@ func linuxVersionMeta() (meta versionMeta) { return } +// linuxBuildTagPackageType is set by packagetype_*.go +// build tag guarded files. +var linuxBuildTagPackageType string + func packageTypeLinux() string { + if v := linuxBuildTagPackageType; v != "" { + return v + } // Report whether this is in a snap. // See https://snapcraft.io/docs/environment-variables // We just look at two somewhat arbitrarily. diff --git a/hostinfo/packagetype_container.go b/hostinfo/packagetype_container.go new file mode 100644 index 0000000000000..9bd14493cb34f --- /dev/null +++ b/hostinfo/packagetype_container.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && ts_package_container + +package hostinfo + +func init() { + linuxBuildTagPackageType = "container" +} diff --git a/ipn/conf.go b/ipn/conf.go index ed6e3c8714daa..6a67f40040c76 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -42,6 +42,10 @@ type ConfigVAlpha struct { AutoUpdate *AutoUpdatePrefs `json:",omitempty"` ServeConfigTemp *ServeConfig `json:",omitempty"` // TODO(bradfitz,maisem): make separate stable type for this + // StaticEndpoints are additional, user-defined endpoints that this node + // should advertise amongst its wireguard endpoints. + StaticEndpoints []netip.AddrPort `json:",omitempty"` + // TODO(bradfitz,maisem): future something like: // Profile map[string]*Config // keyed by alice@gmail.com, corp.com (TailnetSID) } diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 11ea05df30742..d87374bbbcd61 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -88,6 +88,17 @@ var acmeDebug = envknob.RegisterBool("TS_DEBUG_ACME") // If a cert is expired, it will be renewed synchronously otherwise it will be // renewed asynchronously. func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) { + return b.GetCertPEMWithValidity(ctx, domain, 0) +} + +// GetCertPEMWithValidity gets the TLSCertKeyPair for domain, either from cache +// or via the ACME process. ACME process is used for new domain certs, existing +// expired certs or existing certs that should get renewed sooner than +// minValidity. +// +// If a cert is expired, or expires sooner than minValidity, it will be renewed +// synchronously. Otherwise it will be renewed asynchronously. +func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) { if !validLookingCertDomain(domain) { return nil, errors.New("invalid domain") } @@ -109,17 +120,28 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK if pair, err := getCertPEMCached(cs, domain, now); err == nil { // If we got here, we have a valid unexpired cert. // Check whether we should start an async renewal. - if shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair); err != nil { + shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity) + if err != nil { logf("error checking for certificate renewal: %v", err) - } else if shouldRenew { + // Renewal check failed, but the current cert is valid and not + // expired, so it's safe to return. + return pair, nil + } + if !shouldRenew { + return pair, nil + } + if minValidity == 0 { logf("starting async renewal") - // Start renewal in the background. - go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, now) + // Start renewal in the background, return current valid cert. + go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, now, minValidity) + return pair, nil } - return pair, nil + // If the caller requested a specific validity duration, fall through + // to synchronous renewal to fulfill that. + logf("starting sync renewal") } - pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now) + pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now, minValidity) if err != nil { logf("getCertPEM: %v", err) return nil, err @@ -129,7 +151,14 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK // shouldStartDomainRenewal reports whether the domain's cert should be renewed // based on the current time, the cert's expiry, and the ARI check. -func (b *LocalBackend) shouldStartDomainRenewal(cs certStore, domain string, now time.Time, pair *TLSCertKeyPair) (bool, error) { +func (b *LocalBackend) shouldStartDomainRenewal(cs certStore, domain string, now time.Time, pair *TLSCertKeyPair, minValidity time.Duration) (bool, error) { + if minValidity != 0 { + cert, err := pair.parseCertificate() + if err != nil { + return false, fmt.Errorf("parsing certificate: %w", err) + } + return cert.NotAfter.Sub(now) < minValidity, nil + } renewMu.Lock() defer renewMu.Unlock() if renewAt, ok := renewCertAt[domain]; ok { @@ -157,11 +186,7 @@ func (b *LocalBackend) domainRenewed(domain string) { } func (b *LocalBackend) domainRenewalTimeByExpiry(pair *TLSCertKeyPair) (time.Time, error) { - block, _ := pem.Decode(pair.CertPEM) - if block == nil { - return time.Time{}, fmt.Errorf("parsing certificate PEM") - } - cert, err := x509.ParseCertificate(block.Bytes) + cert, err := pair.parseCertificate() if err != nil { return time.Time{}, fmt.Errorf("parsing certificate: %w", err) } @@ -366,6 +391,17 @@ type TLSCertKeyPair struct { Cached bool // whether result came from cache } +func (kp TLSCertKeyPair) parseCertificate() (*x509.Certificate, error) { + block, _ := pem.Decode(kp.CertPEM) + if block == nil { + return nil, fmt.Errorf("error parsing certificate PEM") + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("PEM block is %q, not a CERTIFICATE", block.Type) + } + return x509.ParseCertificate(block.Bytes) +} + func keyFile(dir, domain string) string { return filepath.Join(dir, domain+".key") } func certFile(dir, domain string) string { return filepath.Join(dir, domain+".crt") } @@ -383,7 +419,7 @@ func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKey return cs.Read(domain, now) } -func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time) (*TLSCertKeyPair, error) { +func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { acmeMu.Lock() defer acmeMu.Unlock() @@ -393,7 +429,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger if p, err := getCertPEMCached(cs, domain, now); err == nil { // shouldStartDomainRenewal caches its result so it's OK to call this // frequently. - shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, p) + shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, p, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) } else if !shouldRenew { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9da23f80d61d2..2421d898d2eb8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -25,6 +25,7 @@ import ( "os" "os/exec" "path/filepath" + "reflect" "runtime" "slices" "sort" @@ -59,6 +60,7 @@ import ( "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" + "tailscale.com/net/captivedetection" "tailscale.com/net/dns" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -343,6 +345,21 @@ type LocalBackend struct { // refreshAutoExitNode indicates if the exit node should be recomputed when the next netcheck report is available. refreshAutoExitNode bool + + // captiveCtx and captiveCancel are used to control captive portal + // detection. They are protected by 'mu' and can be changed during the + // lifetime of a LocalBackend. + // + // captiveCtx will always be non-nil, though it might be a canceled + // context. captiveCancel is non-nil if checkCaptivePortalLoop is + // running, and is set to nil after being canceled. + captiveCtx context.Context + captiveCancel context.CancelFunc + // needsCaptiveDetection is a channel that is used to signal either + // that captive portal detection is required (sending true) or that the + // backend is healthy and captive portal detection is not required + // (sending false). + needsCaptiveDetection chan bool } // HealthTracker returns the health tracker for the backend. @@ -391,49 +408,50 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo sds.SetDialer(dialer.SystemDial) } - if sys.InitialConfig != nil { - p := pm.CurrentPrefs().AsStruct() - mp, err := sys.InitialConfig.Parsed.ToPrefs() - if err != nil { - return nil, err - } - p.ApplyEdits(&mp) - if err := pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil { - return nil, err - } - } - envknob.LogCurrent(logf) osshare.SetFileSharingEnabled(false, logf) ctx, cancel := context.WithCancel(context.Background()) clock := tstime.StdClock{} + // Until we transition to a Running state, use a canceled context for + // our captive portal detection. + captiveCtx, captiveCancel := context.WithCancel(ctx) + captiveCancel() + b := &LocalBackend{ - ctx: ctx, - ctxCancel: cancel, - logf: logf, - keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), - statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), - sys: sys, - health: sys.HealthTracker(), - conf: sys.InitialConfig, - e: e, - dialer: dialer, - store: store, - pm: pm, - backendLogID: logID, - state: ipn.NoState, - portpoll: new(portlist.Poller), - em: newExpiryManager(logf), - gotPortPollRes: make(chan struct{}), - loginFlags: loginFlags, - clock: clock, - selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), - lastSelfUpdateState: ipnstate.UpdateFinished, + ctx: ctx, + ctxCancel: cancel, + logf: logf, + keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), + statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), + sys: sys, + health: sys.HealthTracker(), + e: e, + dialer: dialer, + store: store, + pm: pm, + backendLogID: logID, + state: ipn.NoState, + portpoll: new(portlist.Poller), + em: newExpiryManager(logf), + gotPortPollRes: make(chan struct{}), + loginFlags: loginFlags, + clock: clock, + selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), + lastSelfUpdateState: ipnstate.UpdateFinished, + captiveCtx: captiveCtx, + captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running + needsCaptiveDetection: make(chan bool), } mConn.SetNetInfoCallback(b.setNetInfo) + if sys.InitialConfig != nil { + if err := b.setConfigLocked(sys.InitialConfig); err != nil { + return nil, err + } + } + netMon := sys.NetMon.Get() b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker()) if err != nil { @@ -616,11 +634,50 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - b.conf = conf - // TODO(bradfitz): apply things + if err := b.setConfigLocked(conf); err != nil { + return false, fmt.Errorf("error setting config: %w", err) + } + return true, nil } +func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { + + // TODO(irbekrm): notify the relevant components to consume any prefs + // updates. Currently only initial configfile settings are applied + // immediately. + p := b.pm.CurrentPrefs().AsStruct() + mp, err := conf.Parsed.ToPrefs() + if err != nil { + return fmt.Errorf("error parsing config to prefs: %w", err) + } + p.ApplyEdits(&mp) + if err := b.pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil { + return err + } + + defer func() { + b.conf = conf + }() + + if conf.Parsed.StaticEndpoints == nil && (b.conf == nil || b.conf.Parsed.StaticEndpoints == nil) { + return nil + } + + // Ensure that magicsock conn has the up to date static wireguard + // endpoints. Setting the endpoints here triggers an asynchronous update + // of the node's advertised endpoints. + if b.conf == nil && len(conf.Parsed.StaticEndpoints) != 0 || !reflect.DeepEqual(conf.Parsed.StaticEndpoints, b.conf.Parsed.StaticEndpoints) { + ms, ok := b.sys.MagicSock.GetOK() + if !ok { + b.logf("[unexpected] ReloadConfig: MagicSock not set") + } else { + ms.SetStaticEndpoints(views.SliceOf(conf.Parsed.StaticEndpoints)) + } + } + return nil +} + var assumeNetworkUpdateForTest = envknob.RegisterBool("TS_ASSUME_NETWORK_UP_FOR_TEST") // pauseOrResumeControlClientLocked pauses b.cc if there is no network available @@ -636,6 +693,10 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { b.cc.SetPaused((b.state == ipn.Stopped && b.netMap != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) } +// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken +// before running captive portal detection. +const captivePortalDetectionInterval = 2 * time.Second + // linkChange is our network monitor callback, called whenever the network changes. func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.mu.Lock() @@ -686,6 +747,47 @@ func (b *LocalBackend) onHealthChange(w *health.Warnable, us *health.UnhealthySt b.send(ipn.Notify{ Health: state, }) + + isConnectivityImpacted := false + for _, w := range state.Warnings { + // Ignore the captive portal warnable itself. + if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { + isConnectivityImpacted = true + break + } + } + + // captiveCtx can be changed, and is protected with 'mu'; grab that + // before we start our select, below. + // + // It is guaranteed to be non-nil. + b.mu.Lock() + ctx := b.captiveCtx + b.mu.Unlock() + + // If the context is canceled, we don't need to do anything. + if ctx.Err() != nil { + return + } + + if isConnectivityImpacted { + b.logf("health: connectivity impacted; triggering captive portal detection") + + // Ensure that we select on captiveCtx so that we can time out + // triggering captive portal detection if the backend is shutdown. + select { + case b.needsCaptiveDetection <- true: + case <-ctx.Done(): + } + } else { + // If connectivity is not impacted, we know for sure we're not behind a captive portal, + // so drop any warning, and signal that we don't need captive portal detection. + b.health.SetHealthy(captivePortalWarnable) + select { + case b.needsCaptiveDetection <- false: + case <-ctx.Done(): + } + } } // Shutdown halts the backend and all its sub-components. The backend @@ -698,6 +800,11 @@ func (b *LocalBackend) Shutdown() { } b.shutdownCalled = true + if b.captiveCancel != nil { + b.logf("canceling captive portal context") + b.captiveCancel() + } + if b.loginFlags&controlclient.LoginEphemeral != 0 { b.mu.Unlock() ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second) @@ -2069,6 +2176,122 @@ func (b *LocalBackend) updateFilterLocked(netMap *netmap.NetworkMap, prefs ipn.P } } +// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. +var captivePortalWarnable = health.Register(&health.Warnable{ + Code: "captive-portal-detected", + Title: "Captive portal detected", + // High severity, because captive portals block all traffic and require user intervention. + Severity: health.SeverityHigh, + Text: health.StaticMessage("This network requires you to log in using your web browser."), + ImpactsConnectivity: true, +}) + +func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) { + var tmr *time.Timer + + maybeStartTimer := func() { + // If there's an existing timer, nothing to do; just continue + // waiting for it to expire. Otherwise, create a new timer. + if tmr == nil { + tmr = time.NewTimer(captivePortalDetectionInterval) + } + } + maybeStopTimer := func() { + if tmr == nil { + return + } + if !tmr.Stop() { + <-tmr.C + } + tmr = nil + } + + for { + if ctx.Err() != nil { + maybeStopTimer() + return + } + + // First, see if we have a signal on our "healthy" channel, which + // takes priority over an existing timer. Because a select is + // nondeterministic, we explicitly check this channel before + // entering the main select below, so that we're guaranteed to + // stop the timer before starting captive portal detection. + select { + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + maybeStopTimer() + } + default: + } + + var timerChan <-chan time.Time + if tmr != nil { + timerChan = tmr.C + } + select { + case <-ctx.Done(): + // All done; stop the timer and then exit. + maybeStopTimer() + return + case <-timerChan: + // Kick off captive portal check + b.performCaptiveDetection() + // nil out timer to force recreation + tmr = nil + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + // Healthy; cancel any existing timer + maybeStopTimer() + } + } + } +} + +// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs +// the detection and updates the Warnable accordingly. +func (b *LocalBackend) performCaptiveDetection() { + if !b.shouldRunCaptivePortalDetection() { + return + } + + d := captivedetection.NewDetector(b.logf) + var dm *tailcfg.DERPMap + b.mu.Lock() + if b.netMap != nil { + dm = b.netMap.DERPMap + } + preferredDERP := 0 + if b.hostinfo != nil { + if b.hostinfo.NetInfo != nil { + preferredDERP = b.hostinfo.NetInfo.PreferredDERP + } + } + ctx := b.ctx + netMon := b.NetMon() + b.mu.Unlock() + found := d.Detect(ctx, netMon, dm, preferredDERP) + if found { + b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) + } else { + b.health.SetHealthy(captivePortalWarnable) + } +} + +// shouldRunCaptivePortalDetection reports whether captive portal detection +// should be run. It is enabled by default, but can be disabled via a control +// knob. It is also only run when the user explicitly wants the backend to be +// running. +func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() +} + // packetFilterPermitsUnlockedNodes reports any peer in peers with the // UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet // filter. @@ -3563,7 +3786,7 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c return nil }, opts } - if handler := b.tcpHandlerForServe(dst.Port(), src); handler != nil { + if handler := b.tcpHandlerForServe(dst.Port(), src, nil); handler != nil { return handler, opts } return nil, nil @@ -4462,9 +4685,27 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock if newState == ipn.Running { b.authURL = "" b.authURLTime = time.Time{} + + // Start a captive portal detection loop if none has been + // started. Create a new context if none is present, since it + // can be shut down if we transition away from Running. + if b.captiveCancel == nil { + b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) + go b.checkCaptivePortalLoop(b.captiveCtx) + } } else if oldState == ipn.Running { // Transitioning away from running. b.closePeerAPIListenersLocked() + + // Stop any existing captive portal detection loop. + if b.captiveCancel != nil { + b.captiveCancel() + b.captiveCancel = nil + + // NOTE: don't set captiveCtx to nil here, to ensure + // that we always have a (canceled) context to wait on + // in onHealthChange. + } } b.pauseOrResumeControlClientLocked() @@ -4889,6 +5130,27 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { cc := b.cc var refresh bool if b.MagicConn().DERPs() > 0 || testenv.InTest() { + // When b.refreshAutoExitNode is set, we recently observed a link change + // that indicates we have switched networks. After switching networks, + // the previously selected automatic exit node is no longer as likely + // to be a good choice and connectivity will already be broken due to + // the network switch. Therefore, it is a good time to switch to a new + // exit node because the network is already disrupted. + // + // Unfortunately, at the time of the link change, no information is + // known about the new network's latency or location, so the necessary + // details are not available to make a new choice. Instead, it sets + // b.refreshAutoExitNode to signal that a new decision should be made + // when we have an updated netcheck report. ni is that updated report. + // + // However, during testing we observed that often the first ni is + // inconclusive because it was running during the link change or the + // link was otherwise not stable yet. b.MagicConn().updateEndpoints() + // can detect when the netcheck failed and trigger a rebind, but the + // required information is not available here, and moderate additional + // plumbing is required to pass that in. Instead, checking for an active + // DERP link offers an easy approximation. We will continue to refine + // this over time. refresh = b.refreshAutoExitNode b.refreshAutoExitNode = false } diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 943e652e05518..593c5493cde61 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/tsconst" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" @@ -716,7 +717,7 @@ func (b *LocalBackend) NetworkLockSign(nodeKey key.NodePublic, rotationPublic [] return key.NodePublic{}, tka.NodeKeySignature{}, errNetworkLockNotActive } if !b.tka.authority.KeyTrusted(nlPriv.KeyID()) { - return key.NodePublic{}, tka.NodeKeySignature{}, errors.New("this node is not trusted by network lock") + return key.NodePublic{}, tka.NodeKeySignature{}, errors.New(tsconst.TailnetLockNotTrustedMsg) } p, err := nodeKey.MarshalBinary() diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 0b5d5e89ab2ae..9ad05a1961813 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -56,6 +56,16 @@ var serveHTTPContextKey ctxkey.Key[*serveHTTPContext] type serveHTTPContext struct { SrcAddr netip.AddrPort DestPort uint16 + + // provides funnel-specific context, nil if not funneled + Funnel *funnelFlow +} + +// funnelFlow represents a funneled connection initiated via IngressPeer +// to Host. +type funnelFlow struct { + Host string + IngressPeer tailcfg.NodeView } // localListener is the state of host-level net.Listen for a specific (Tailscale IP, port) @@ -91,7 +101,7 @@ func (b *LocalBackend) newServeListener(ctx context.Context, ap netip.AddrPort, handler: func(conn net.Conn) error { srcAddr := conn.RemoteAddr().(*net.TCPAddr).AddrPort() - handler := b.tcpHandlerForServe(ap.Port(), srcAddr) + handler := b.tcpHandlerForServe(ap.Port(), srcAddr, nil) if handler == nil { b.logf("[unexpected] local-serve: no handler for %v to port %v", srcAddr, ap.Port()) conn.Close() @@ -382,7 +392,7 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target return } - _, port, err := net.SplitHostPort(string(target)) + host, port, err := net.SplitHostPort(string(target)) if err != nil { logf("got ingress conn for bad target %q; rejecting", target) sendRST() @@ -407,9 +417,10 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target return } } - // TODO(bradfitz): pass ingressPeer etc in context to tcpHandlerForServe, - // extend serveHTTPContext or similar. - handler := b.tcpHandlerForServe(dport, srcAddr) + handler := b.tcpHandlerForServe(dport, srcAddr, &funnelFlow{ + Host: host, + IngressPeer: ingressPeer, + }) if handler == nil { logf("[unexpected] no matching ingress serve handler for %v to port %v", srcAddr, dport) sendRST() @@ -424,8 +435,9 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target } // tcpHandlerForServe returns a handler for a TCP connection to be served via -// the ipn.ServeConfig. -func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort) (handler func(net.Conn) error) { +// the ipn.ServeConfig. The funnelFlow can be nil if this is not a funneled +// connection. +func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error) { b.mu.Lock() sc := b.serveConfig b.mu.Unlock() @@ -444,6 +456,7 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort) Handler: http.HandlerFunc(b.serveWebHandler), BaseContext: func(_ net.Listener) context.Context { return serveHTTPContextKey.WithValue(context.Background(), &serveHTTPContext{ + Funnel: f, SrcAddr: srcAddr, DestPort: dport, }) @@ -712,15 +725,20 @@ func (b *LocalBackend) addTailscaleIdentityHeaders(r *httputil.ProxyRequest) { r.Out.Header.Del("Tailscale-User-Login") r.Out.Header.Del("Tailscale-User-Name") r.Out.Header.Del("Tailscale-User-Profile-Pic") + r.Out.Header.Del("Tailscale-Funnel-Request") r.Out.Header.Del("Tailscale-Headers-Info") c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) if !ok { return } + if c.Funnel != nil { + r.Out.Header.Set("Tailscale-Funnel-Request", "?1") + return + } node, user, ok := b.WhoIs("tcp", c.SrcAddr) if !ok { - return // traffic from outside of Tailnet (funneled) + return // traffic from outside of Tailnet (funneled or local machine) } if node.IsTagged() { // 2023-06-14: Not setting identity headers for tagged nodes. diff --git a/ipn/localapi/cert.go b/ipn/localapi/cert.go index 447c3bc3cb866..323406f7ba650 100644 --- a/ipn/localapi/cert.go +++ b/ipn/localapi/cert.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "strings" + "time" "tailscale.com/ipn/ipnlocal" ) @@ -23,7 +24,16 @@ func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) { http.Error(w, "internal handler config wired wrong", 500) return } - pair, err := h.b.GetCertPEM(r.Context(), domain) + var minValidity time.Duration + if minValidityStr := r.URL.Query().Get("min_validity"); minValidityStr != "" { + var err error + minValidity, err = time.ParseDuration(minValidityStr) + if err != nil { + http.Error(w, fmt.Sprintf("invalid validity parameter: %v", err), http.StatusBadRequest) + return + } + } + pair, err := h.b.GetCertPEMWithValidity(r.Context(), domain, minValidity) if err != nil { // TODO(bradfitz): 500 is a little lazy here. The errors returned from // GetCertPEM (and everywhere) should carry info info to get whether diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 565bb7b4e439a..3be469193c441 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1752,10 +1752,17 @@ func (ww *multiFilePostResponseWriter) Write(p []byte) (int, error) { } func (ww *multiFilePostResponseWriter) Flush(w http.ResponseWriter) error { - maps.Copy(w.Header(), ww.Header()) - w.WriteHeader(ww.statusCode) - _, err := io.Copy(w, ww.body) - return err + if ww.header != nil { + maps.Copy(w.Header(), ww.header) + } + if ww.statusCode > 0 { + w.WriteHeader(ww.statusCode) + } + if ww.body != nil { + _, err := io.Copy(w, ww.body) + return err + } + return nil } func (h *Handler) singleFilePut( diff --git a/k8s-operator/api-docs-config.yaml b/k8s-operator/api-docs-config.yaml new file mode 100644 index 0000000000000..214171ca35c0d --- /dev/null +++ b/k8s-operator/api-docs-config.yaml @@ -0,0 +1,6 @@ +# Copyright (c) Tailscale Inc & AUTHORS +# SPDX-License-Identifier: BSD-3-Clause + +processor: {} +render: + kubernetesVersion: 1.30 diff --git a/k8s-operator/api.md b/k8s-operator/api.md index b47641d08415f..1b72df0f2720b 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -1,27 +1,25 @@ # API Reference -Packages: - +## Packages - [tailscale.com/v1alpha1](#tailscalecomv1alpha1) -# tailscale.com/v1alpha1 -Resource Types: +## tailscale.com/v1alpha1 -- [Connector](#connector) +### Resource Types +- [Connector](#connector) +- [ConnectorList](#connectorlist) - [DNSConfig](#dnsconfig) - +- [DNSConfigList](#dnsconfiglist) - [ProxyClass](#proxyclass) +- [ProxyClassList](#proxyclasslist) -## Connector -[↩ Parent](#tailscalecomv1alpha1 ) - - +#### Connector @@ -32,309 +30,106 @@ Connector is a cluster-scoped resource. More info: https://tailscale.com/kb/1236/kubernetes-operator#deploying-exit-nodes-and-subnet-routers-on-kubernetes-using-connector-custom-resource - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescriptionRequired
apiVersionstringtailscale.com/v1alpha1true
kindstringConnectortrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobject - ConnectorSpec describes the desired Tailscale component. -More info: -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-
- Validations:
  • has(self.subnetRouter) || self.exitNode == true: A Connector needs to be either an exit node or a subnet router, or both.
  • -
    true
    statusobject - ConnectorStatus describes the status of the Connector. This is set -and managed by the Tailscale operator.
    -
    false
    -### Connector.spec -[↩ Parent](#connector) +_Appears in:_ +- [ConnectorList](#connectorlist) +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `Connector` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
    Servers may infer this from the endpoint the client submits requests to.
    Cannot be updated.
    In CamelCase.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
    Servers should convert recognized schemas to the latest internal value, and
    may reject unrecognized values.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ConnectorSpec](#connectorspec)_ | ConnectorSpec describes the desired Tailscale component.
    More info:
    https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[ConnectorStatus](#connectorstatus)_ | ConnectorStatus describes the status of the Connector. This is set
    and managed by the Tailscale operator. | | | -ConnectorSpec describes the desired Tailscale component. -More info: -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    exitNodeboolean - ExitNode defines whether the Connector node should act as a -Tailscale exit node. Defaults to false. -https://tailscale.com/kb/1103/exit-nodes
    -
    false
    hostnamestring - Hostname is the tailnet hostname that should be assigned to the -Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and -dashes, it must not start or end with a dash and must be between 2 -and 63 characters long.
    -
    false
    proxyClassstring - ProxyClass is the name of the ProxyClass custom resource that -contains configuration options that should be applied to the -resources created for this Connector. If unset, the operator will -create resources with the default configuration.
    -
    false
    subnetRouterobject - SubnetRouter defines subnet routes that the Connector node should -expose to tailnet. If unset, none are exposed. -https://tailscale.com/kb/1019/subnets/
    -
    false
    tags[]string - Tags that the Tailscale node will be tagged with. -Defaults to [tag:k8s]. -To autoapprove the subnet routes or exit node defined by a Connector, -you can configure Tailscale ACLs to give these tags the necessary -permissions. -See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes. -If you specify custom tags here, you must also make the operator an owner of these tags. -See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. -Tags cannot be changed once a Connector node has been created. -Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$.
    -
    false
    - - -### Connector.spec.subnetRouter -[↩ Parent](#connectorspec) - - - -SubnetRouter defines subnet routes that the Connector node should -expose to tailnet. If unset, none are exposed. -https://tailscale.com/kb/1019/subnets/ - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    advertiseRoutes[]string - AdvertiseRoutes refer to CIDRs that the subnet router should make -available. Route values must be strings that represent a valid IPv4 -or IPv6 CIDR range. Values can be Tailscale 4via6 subnet routes. -https://tailscale.com/kb/1201/4via6-subnets/
    -
    true
    - - -### Connector.status -[↩ Parent](#connector) - - - -ConnectorStatus describes the status of the Connector. This is set -and managed by the Tailscale operator. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    conditions[]object - List of status conditions to indicate the status of the Connector. -Known condition types are `ConnectorReady`.
    -
    false
    hostnamestring - Hostname is the fully qualified domain name of the Connector node. -If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the -node.
    -
    false
    isExitNodeboolean - IsExitNode is set to true if the Connector acts as an exit node.
    -
    false
    subnetRoutesstring - SubnetRoutes are the routes currently exposed to tailnet via this -Connector instance.
    -
    false
    tailnetIPs[]string - TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) -assigned to the Connector node.
    -
    false
    - - -### Connector.status.conditions[index] -[↩ Parent](#connectorstatus) - - - -Condition contains details for one aspect of the current state of this API Resource. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    lastTransitionTimestring - lastTransitionTime is the last time the condition transitioned from one status to another. -This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
    -
    - Format: date-time
    -
    true
    messagestring - message is a human readable message indicating details about the transition. -This may be an empty string.
    -
    true
    reasonstring - reason contains a programmatic identifier indicating the reason for the condition's last transition. -Producers of specific condition types may define expected values and meanings for this field, -and whether the values are considered a guaranteed API. -The value should be a CamelCase string. -This field may not be empty.
    -
    true
    statusenum - status of the condition, one of True, False, Unknown.
    -
    - Enum: True, False, Unknown
    -
    true
    typestring - type of condition in CamelCase or in foo.example.com/CamelCase.
    -
    true
    observedGenerationinteger - observedGeneration represents the .metadata.generation that the condition was set based upon. -For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date -with respect to the current state of the instance.
    -
    - Format: int64
    - Minimum: 0
    -
    false
    - -## DNSConfig -[↩ Parent](#tailscalecomv1alpha1 ) +#### ConnectorList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ConnectorList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
    Servers may infer this from the endpoint the client submits requests to.
    Cannot be updated.
    In CamelCase.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
    Servers should convert recognized schemas to the latest internal value, and
    may reject unrecognized values.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Connector](#connector) array_ | | | | + + +#### ConnectorSpec + + + +ConnectorSpec describes a Tailscale node to be deployed in the cluster. + + + +_Appears in:_ +- [Connector](#connector) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
    Defaults to [tag:k8s].
    To autoapprove the subnet routes or exit node defined by a Connector,
    you can configure Tailscale ACLs to give these tags the necessary
    permissions.
    See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes.
    If you specify custom tags here, you must also make the operator an owner of these tags.
    See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
    Tags cannot be changed once a Connector node has been created.
    Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
    Type: string
    | +| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
    Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
    dashes, it must not start or end with a dash and must be between 2
    and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
    Type: string
    | +| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
    contains configuration options that should be applied to the
    resources created for this Connector. If unset, the operator will
    create resources with the default configuration. | | | +| `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector node should
    expose to tailnet. If unset, none are exposed.
    https://tailscale.com/kb/1019/subnets/ | | | +| `exitNode` _boolean_ | ExitNode defines whether the Connector node should act as a
    Tailscale exit node. Defaults to false.
    https://tailscale.com/kb/1103/exit-nodes | | | + + +#### ConnectorStatus + + + +ConnectorStatus defines the observed state of the Connector. + + + +_Appears in:_ +- [Connector](#connector) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Connector.
    Known condition types are `ConnectorReady`. | | | +| `subnetRoutes` _string_ | SubnetRoutes are the routes currently exposed to tailnet via this
    Connector instance. | | | +| `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | | +| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
    assigned to the Connector node. | | | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
    If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
    node. | | | + + +#### Container + +_Appears in:_ +- [Pod](#pod) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `env` _[Env](#env) array_ | List of environment variables to set in the container.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
    Note that environment variables provided here will take precedence
    over Tailscale-specific environment variables set by the operator,
    however running proxies with custom values for Tailscale environment
    variables (i.e TS_USERSPACE) is not recommended and might break in
    the future. | | | +| `image` _string_ | Container image name. By default images are pulled from
    docker.io/tailscale/tailscale, but the official images are also
    available at ghcr.io/tailscale/tailscale. Specifying image name here
    will override any proxy image values specified via the Kubernetes
    operator's Helm chart values or PROXY_IMAGE env var in the operator
    Deployment.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | +| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
    | +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
    By default Tailscale Kubernetes operator does not apply any resource
    requirements. The amount of resources required wil depend on the
    amount of resources the operator needs to parse, usage patterns and
    cluster size.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | +| `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
    Security context specified here will override the security context by the operator.
    By default the operator:
    - sets 'privileged: true' for the init container
    - set NET_ADMIN capability for tailscale container for proxies that
    are created for Services or Connector.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | + + +#### DNSConfig + + + DNSConfig can be deployed to cluster to make a subset of Tailscale MagicDNS names resolvable by cluster workloads. Use this if: A) you need to refer to tailnet services, exposed to cluster via Tailscale Kubernetes operator egress @@ -362,300 +157,216 @@ tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    apiVersionstringtailscale.com/v1alpha1true
    kindstringDNSConfigtrue
    metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
    specobject - Spec describes the desired DNS configuration. -More info: -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -
    true
    statusobject - Status describes the status of the DNSConfig. This is set -and managed by the Tailscale operator.
    -
    false
    -### DNSConfig.spec -[↩ Parent](#dnsconfig) +_Appears in:_ +- [DNSConfigList](#dnsconfiglist) +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `DNSConfig` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
    Servers may infer this from the endpoint the client submits requests to.
    Cannot be updated.
    In CamelCase.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
    Servers should convert recognized schemas to the latest internal value, and
    may reject unrecognized values.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[DNSConfigSpec](#dnsconfigspec)_ | Spec describes the desired DNS configuration.
    More info:
    https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[DNSConfigStatus](#dnsconfigstatus)_ | Status describes the status of the DNSConfig. This is set
    and managed by the Tailscale operator. | | | -Spec describes the desired DNS configuration. -More info: -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    nameserverobject - Configuration for a nameserver that can resolve ts.net DNS names -associated with in-cluster proxies for Tailscale egress Services and -Tailscale Ingresses. The operator will always deploy this nameserver -when a DNSConfig is applied.
    -
    true
    - - -### DNSConfig.spec.nameserver -[↩ Parent](#dnsconfigspec) - - - -Configuration for a nameserver that can resolve ts.net DNS names -associated with in-cluster proxies for Tailscale egress Services and -Tailscale Ingresses. The operator will always deploy this nameserver -when a DNSConfig is applied. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    imageobject - Nameserver image.
    -
    false
    - - -### DNSConfig.spec.nameserver.image -[↩ Parent](#dnsconfigspecnameserver) - - - -Nameserver image. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    repostring - Repo defaults to tailscale/k8s-nameserver.
    -
    false
    tagstring - Tag defaults to operator's own tag.
    -
    false
    - - -### DNSConfig.status -[↩ Parent](#dnsconfig) - - - -Status describes the status of the DNSConfig. This is set -and managed by the Tailscale operator. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    conditions[]object -
    -
    false
    nameserverobject - Nameserver describes the status of nameserver cluster resources.
    -
    false
    - - -### DNSConfig.status.conditions[index] -[↩ Parent](#dnsconfigstatus) - - - -Condition contains details for one aspect of the current state of this API Resource. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    lastTransitionTimestring - lastTransitionTime is the last time the condition transitioned from one status to another. -This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
    -
    - Format: date-time
    -
    true
    messagestring - message is a human readable message indicating details about the transition. -This may be an empty string.
    -
    true
    reasonstring - reason contains a programmatic identifier indicating the reason for the condition's last transition. -Producers of specific condition types may define expected values and meanings for this field, -and whether the values are considered a guaranteed API. -The value should be a CamelCase string. -This field may not be empty.
    -
    true
    statusenum - status of the condition, one of True, False, Unknown.
    -
    - Enum: True, False, Unknown
    -
    true
    typestring - type of condition in CamelCase or in foo.example.com/CamelCase.
    -
    true
    observedGenerationinteger - observedGeneration represents the .metadata.generation that the condition was set based upon. -For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date -with respect to the current state of the instance.
    -
    - Format: int64
    - Minimum: 0
    -
    false
    - - -### DNSConfig.status.nameserver -[↩ Parent](#dnsconfigstatus) - - - -Nameserver describes the status of nameserver cluster resources. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    ipstring - IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. -Currently you must manually update your cluster DNS config to add -this address as a stub nameserver for ts.net for cluster workloads to be -able to resolve MagicDNS names associated with egress or Ingress -proxies. -The IP address will change if you delete and recreate the DNSConfig.
    -
    false
    - -## ProxyClass -[↩ Parent](#tailscalecomv1alpha1 ) +#### DNSConfigList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `DNSConfigList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
    Servers may infer this from the endpoint the client submits requests to.
    Cannot be updated.
    In CamelCase.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
    Servers should convert recognized schemas to the latest internal value, and
    may reject unrecognized values.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[DNSConfig](#dnsconfig) array_ | | | | + + +#### DNSConfigSpec + + + + + + + +_Appears in:_ +- [DNSConfig](#dnsconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `nameserver` _[Nameserver](#nameserver)_ | Configuration for a nameserver that can resolve ts.net DNS names
    associated with in-cluster proxies for Tailscale egress Services and
    Tailscale Ingresses. The operator will always deploy this nameserver
    when a DNSConfig is applied. | | | + + +#### DNSConfigStatus + + + + + + + +_Appears in:_ +- [DNSConfig](#dnsconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | | | | +| `nameserver` _[NameserverStatus](#nameserverstatus)_ | Nameserver describes the status of nameserver cluster resources. | | | + + +#### Env + + + + + + + +_Appears in:_ +- [Container](#container) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _[Name](#name)_ | Name of the environment variable. Must be a C_IDENTIFIER. | | Pattern: `^[-._a-zA-Z][-._a-zA-Z0-9]*$`
    Type: string
    | +| `value` _string_ | Variable references $(VAR_NAME) are expanded using the previously defined
    environment variables in the container and any service environment
    variables. If a variable cannot be resolved, the reference in the input
    string will be unchanged. Double $$ are reduced to a single $, which
    allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
    produce the string literal "$(VAR_NAME)". Escaped references will never
    be expanded, regardless of whether the variable exists or not. Defaults
    to "". | | | + + +#### Hostname + +_Underlying type:_ _string_ + + + +_Validation:_ +- Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$` +- Type: string + +_Appears in:_ +- [ConnectorSpec](#connectorspec) + + + +#### Image + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `repo` _string_ | Repo defaults to tailscale/k8s-nameserver. | | | +| `tag` _string_ | Tag defaults to operator's own tag. | | | + + +#### Metrics + + + + + + + +_Appears in:_ +- [ProxyClassSpec](#proxyclassspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `enable` _boolean_ | Setting enable to true will make the proxy serve Tailscale metrics
    at :9001/debug/metrics.
    Defaults to false. | | | + + +#### Name + +_Underlying type:_ _string_ + + + +_Validation:_ +- Pattern: `^[-._a-zA-Z][-._a-zA-Z0-9]*$` +- Type: string + +_Appears in:_ +- [Env](#env) + + + +#### Nameserver + +_Appears in:_ +- [DNSConfigSpec](#dnsconfigspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `image` _[Image](#image)_ | Nameserver image. | | | + + +#### NameserverStatus + + + + + + + +_Appears in:_ +- [DNSConfigStatus](#dnsconfigstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
    Currently you must manually update your cluster DNS config to add
    this address as a stub nameserver for ts.net for cluster workloads to be
    able to resolve MagicDNS names associated with egress or Ingress
    proxies.
    The IP address will change if you delete and recreate the DNSConfig. | | | + + +#### Pod + + + + + + + +_Appears in:_ +- [StatefulSet](#statefulset) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `labels` _object (keys:string, values:string)_ | Labels that will be added to the proxy Pod.
    Any labels specified here will be merged with the default labels
    applied to the Pod by the Tailscale Kubernetes operator.
    Label keys and values must be valid Kubernetes label keys and values.
    https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | +| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the proxy Pod.
    Any annotations specified here will be merged with the default
    annotations applied to the Pod by the Tailscale Kubernetes operator.
    Annotations must be valid Kubernetes annotations.
    https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Proxy Pod's affinity rules.
    By default, the Tailscale Kubernetes operator does not apply any affinity rules.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | | +| `tailscaleContainer` _[Container](#container)_ | Configuration for the proxy container running tailscale. | | | +| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding. | | | +| `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Proxy Pod's security context.
    By default Tailscale Kubernetes operator does not apply any Pod
    security context.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | | +| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Proxy Pod's image pull Secrets.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | | +| `nodeName` _string_ | Proxy Pod's node name.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector.
    By default Tailscale Kubernetes operator does not apply any node
    selector.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
    By default Tailscale Kubernetes operator does not apply any
    tolerations.
    https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | + + +#### ProxyClass + + + ProxyClass describes a set of configuration parameters that can be applied to proxy resources created by the Tailscale Kubernetes operator. To apply a given ProxyClass to resources created for a tailscale Ingress or @@ -666,3749 +377,185 @@ ProxyClass is a cluster scoped resource. More info: https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    apiVersionstringtailscale.com/v1alpha1true
    kindstringProxyClasstrue
    metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
    specobject - Specification of the desired state of the ProxyClass resource. -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -
    true
    statusobject - Status of the ProxyClass. This is set and managed automatically. -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
    -
    false
    - - -### ProxyClass.spec -[↩ Parent](#proxyclass) - - - -Specification of the desired state of the ProxyClass resource. -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    metricsobject - Configuration for proxy metrics. Metrics are currently not supported -for egress proxies and for Ingress proxies that have been configured -with tailscale.com/experimental-forward-cluster-traffic-via-ingress -annotation. Note that the metrics are currently considered unstable -and will likely change in breaking ways in the future - we only -recommend that you use those for debugging purposes.
    -
    false
    statefulSetobject - Configuration parameters for the proxy's StatefulSet. Tailscale -Kubernetes operator deploys a StatefulSet for each of the user -configured proxies (Tailscale Ingress, Tailscale Service, Connector).
    -
    false
    tailscaleobject - TailscaleConfig contains options to configure the tailscale-specific -parameters of proxies.
    -
    false
    - - -### ProxyClass.spec.metrics -[↩ Parent](#proxyclassspec) - - - -Configuration for proxy metrics. Metrics are currently not supported -for egress proxies and for Ingress proxies that have been configured -with tailscale.com/experimental-forward-cluster-traffic-via-ingress -annotation. Note that the metrics are currently considered unstable -and will likely change in breaking ways in the future - we only -recommend that you use those for debugging purposes. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    enableboolean - Setting enable to true will make the proxy serve Tailscale metrics -at :9001/debug/metrics. -Defaults to false.
    -
    true
    - - -### ProxyClass.spec.statefulSet -[↩ Parent](#proxyclassspec) - - - -Configuration parameters for the proxy's StatefulSet. Tailscale -Kubernetes operator deploys a StatefulSet for each of the user -configured proxies (Tailscale Ingress, Tailscale Service, Connector). - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    annotationsmap[string]string - Annotations that will be added to the StatefulSet created for the proxy. -Any Annotations specified here will be merged with the default annotations -applied to the StatefulSet by the Tailscale Kubernetes operator as -well as any other annotations that might have been applied by other -actors. -Annotations must be valid Kubernetes annotations. -https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
    -
    false
    labelsmap[string]string - Labels that will be added to the StatefulSet created for the proxy. -Any labels specified here will be merged with the default labels -applied to the StatefulSet by the Tailscale Kubernetes operator as -well as any other labels that might have been applied by other -actors. -Label keys and values must be valid Kubernetes label keys and values. -https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
    -
    false
    podobject - Configuration for the proxy Pod.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod -[↩ Parent](#proxyclassspecstatefulset) - - - -Configuration for the proxy Pod. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    affinityobject - Proxy Pod's affinity rules. -By default, the Tailscale Kubernetes operator does not apply any affinity rules. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity
    -
    false
    annotationsmap[string]string - Annotations that will be added to the proxy Pod. -Any annotations specified here will be merged with the default -annotations applied to the Pod by the Tailscale Kubernetes operator. -Annotations must be valid Kubernetes annotations. -https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set
    -
    false
    imagePullSecrets[]object - Proxy Pod's image pull Secrets. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
    -
    false
    labelsmap[string]string - Labels that will be added to the proxy Pod. -Any labels specified here will be merged with the default labels -applied to the Pod by the Tailscale Kubernetes operator. -Label keys and values must be valid Kubernetes label keys and values. -https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
    -
    false
    nodeNamestring - Proxy Pod's node name. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
    -
    false
    nodeSelectormap[string]string - Proxy Pod's node selector. -By default Tailscale Kubernetes operator does not apply any node -selector. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
    -
    false
    securityContextobject - Proxy Pod's security context. -By default Tailscale Kubernetes operator does not apply any Pod -security context. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2
    -
    false
    tailscaleContainerobject - Configuration for the proxy container running tailscale.
    -
    false
    tailscaleInitContainerobject - Configuration for the proxy init container that enables forwarding.
    -
    false
    tolerations[]object - Proxy Pod's tolerations. -By default Tailscale Kubernetes operator does not apply any -tolerations. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity -[↩ Parent](#proxyclassspecstatefulsetpod) - - - -Proxy Pod's affinity rules. -By default, the Tailscale Kubernetes operator does not apply any affinity rules. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    nodeAffinityobject - Describes node affinity scheduling rules for the pod.
    -
    false
    podAffinityobject - Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
    -
    false
    podAntiAffinityobject - Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity -[↩ Parent](#proxyclassspecstatefulsetpodaffinity) - - - -Describes node affinity scheduling rules for the pod. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    preferredDuringSchedulingIgnoredDuringExecution[]object - The scheduler will prefer to schedule pods to nodes that satisfy -the affinity expressions specified by this field, but it may choose -a node that violates one or more of the expressions. The node that is -most preferred is the one with the greatest sum of weights, i.e. -for each node that meets all of the scheduling requirements (resource -request, requiredDuringScheduling affinity expressions, etc.), -compute a sum by iterating through the elements of this field and adding -"weight" to the sum if the node matches the corresponding matchExpressions; the -node(s) with the highest sum are the most preferred.
    -
    false
    requiredDuringSchedulingIgnoredDuringExecutionobject - If the affinity requirements specified by this field are not met at -scheduling time, the pod will not be scheduled onto the node. -If the affinity requirements specified by this field cease to be met -at some point during pod execution (e.g. due to an update), the system -may or may not try to eventually evict the pod from its node.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinity) - - - -An empty preferred scheduling term matches all objects with implicit weight 0 -(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    preferenceobject - A node selector term, associated with the corresponding weight.
    -
    true
    weightinteger - Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
    -
    - Format: int32
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionindex) - - - -A node selector term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - A list of node selector requirements by node's labels.
    -
    false
    matchFields[]object - A list of node selector requirements by node's fields.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionindexpreference) - - - -A node selector requirement is a selector that contains values, a key, and an operator -that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - The label key that the selector applies to.
    -
    true
    operatorstring - Represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
    -
    true
    values[]string - An array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. If the operator is Gt or Lt, the values -array must have a single element, which will be interpreted as an integer. -This array is replaced during a strategic merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].preference.matchFields[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinitypreferredduringschedulingignoredduringexecutionindexpreference) - - - -A node selector requirement is a selector that contains values, a key, and an operator -that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - The label key that the selector applies to.
    -
    true
    operatorstring - Represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
    -
    true
    values[]string - An array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. If the operator is Gt or Lt, the values -array must have a single element, which will be interpreted as an integer. -This array is replaced during a strategic merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinity) - - - -If the affinity requirements specified by this field are not met at -scheduling time, the pod will not be scheduled onto the node. -If the affinity requirements specified by this field cease to be met -at some point during pod execution (e.g. due to an update), the system -may or may not try to eventually evict the pod from its node. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    nodeSelectorTerms[]object - Required. A list of node selector terms. The terms are ORed.
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinityrequiredduringschedulingignoredduringexecution) - - - -A null or empty node selector term matches no objects. The requirements of -them are ANDed. -The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - A list of node selector requirements by node's labels.
    -
    false
    matchFields[]object - A list of node selector requirements by node's fields.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsindex) - - - -A node selector requirement is a selector that contains values, a key, and an operator -that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - The label key that the selector applies to.
    -
    true
    operatorstring - Represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
    -
    true
    values[]string - An array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. If the operator is Gt or Lt, the values -array must have a single element, which will be interpreted as an integer. -This array is replaced during a strategic merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[index].matchFields[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitynodeaffinityrequiredduringschedulingignoredduringexecutionnodeselectortermsindex) - - - -A node selector requirement is a selector that contains values, a key, and an operator -that relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - The label key that the selector applies to.
    -
    true
    operatorstring - Represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
    -
    true
    values[]string - An array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. If the operator is Gt or Lt, the values -array must have a single element, which will be interpreted as an integer. -This array is replaced during a strategic merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity -[↩ Parent](#proxyclassspecstatefulsetpodaffinity) - - - -Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    preferredDuringSchedulingIgnoredDuringExecution[]object - The scheduler will prefer to schedule pods to nodes that satisfy -the affinity expressions specified by this field, but it may choose -a node that violates one or more of the expressions. The node that is -most preferred is the one with the greatest sum of weights, i.e. -for each node that meets all of the scheduling requirements (resource -request, requiredDuringScheduling affinity expressions, etc.), -compute a sum by iterating through the elements of this field and adding -"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the -node(s) with the highest sum are the most preferred.
    -
    false
    requiredDuringSchedulingIgnoredDuringExecution[]object - If the affinity requirements specified by this field are not met at -scheduling time, the pod will not be scheduled onto the node. -If the affinity requirements specified by this field cease to be met -at some point during pod execution (e.g. due to a pod label update), the -system may or may not try to eventually evict the pod from its node. -When there are multiple elements, the lists of nodes corresponding to each -podAffinityTerm are intersected, i.e. all terms must be satisfied.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinity) - - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    podAffinityTermobject - Required. A pod affinity term, associated with the corresponding weight.
    -
    true
    weightinteger - weight associated with matching the corresponding podAffinityTerm, -in the range 1-100.
    -
    - Format: int32
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindex) - - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    topologyKeystring - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching -the labelSelector in the specified namespaces, where co-located is defined as running on a node -whose value of the label with key topologyKey matches that of any node on which any of the -selected pods is running. -Empty topologyKey is not allowed.
    -
    true
    labelSelectorobject - A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods.
    -
    false
    matchLabelKeys[]string - MatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both matchLabelKeys and labelSelector. -Also, matchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    mismatchLabelKeys[]string - MismatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. -Also, mismatchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    namespaceSelectorobject - A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces.
    -
    false
    namespaces[]string - namespaces specifies a static list of namespace names that the term applies to. -The term is applied to the union of the namespaces listed in this field -and the ones selected by namespaceSelector. -null or empty namespaces list and null namespaceSelector means "this pod's namespace".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm) - - - -A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermlabelselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm) - - - -A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermnamespaceselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinity) - - - -Defines a set of pods (namely those matching the labelSelector -relative to the given namespace(s)) that this pod should be -co-located (affinity) or not co-located (anti-affinity) with, -where co-located is defined as running on a node whose value of -the label with key matches that of any node on which -a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    topologyKeystring - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching -the labelSelector in the specified namespaces, where co-located is defined as running on a node -whose value of the label with key topologyKey matches that of any node on which any of the -selected pods is running. -Empty topologyKey is not allowed.
    -
    true
    labelSelectorobject - A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods.
    -
    false
    matchLabelKeys[]string - MatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both matchLabelKeys and labelSelector. -Also, matchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    mismatchLabelKeys[]string - MismatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. -Also, mismatchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    namespaceSelectorobject - A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces.
    -
    false
    namespaces[]string - namespaces specifies a static list of namespace names that the term applies to. -The term is applied to the union of the namespaces listed in this field -and the ones selected by namespaceSelector. -null or empty namespaces list and null namespaceSelector means "this pod's namespace".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindex) - - - -A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindexlabelselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindex) - - - -A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodaffinityrequiredduringschedulingignoredduringexecutionindexnamespaceselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity -[↩ Parent](#proxyclassspecstatefulsetpodaffinity) - - - -Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    preferredDuringSchedulingIgnoredDuringExecution[]object - The scheduler will prefer to schedule pods to nodes that satisfy -the anti-affinity expressions specified by this field, but it may choose -a node that violates one or more of the expressions. The node that is -most preferred is the one with the greatest sum of weights, i.e. -for each node that meets all of the scheduling requirements (resource -request, requiredDuringScheduling anti-affinity expressions, etc.), -compute a sum by iterating through the elements of this field and adding -"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the -node(s) with the highest sum are the most preferred.
    -
    false
    requiredDuringSchedulingIgnoredDuringExecution[]object - If the anti-affinity requirements specified by this field are not met at -scheduling time, the pod will not be scheduled onto the node. -If the anti-affinity requirements specified by this field cease to be met -at some point during pod execution (e.g. due to a pod label update), the -system may or may not try to eventually evict the pod from its node. -When there are multiple elements, the lists of nodes corresponding to each -podAffinityTerm are intersected, i.e. all terms must be satisfied.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinity) - - - -The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    podAffinityTermobject - Required. A pod affinity term, associated with the corresponding weight.
    -
    true
    weightinteger - weight associated with matching the corresponding podAffinityTerm, -in the range 1-100.
    -
    - Format: int32
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindex) - - - -Required. A pod affinity term, associated with the corresponding weight. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    topologyKeystring - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching -the labelSelector in the specified namespaces, where co-located is defined as running on a node -whose value of the label with key topologyKey matches that of any node on which any of the -selected pods is running. -Empty topologyKey is not allowed.
    -
    true
    labelSelectorobject - A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods.
    -
    false
    matchLabelKeys[]string - MatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both matchLabelKeys and labelSelector. -Also, matchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    mismatchLabelKeys[]string - MismatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. -Also, mismatchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    namespaceSelectorobject - A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces.
    -
    false
    namespaces[]string - namespaces specifies a static list of namespace names that the term applies to. -The term is applied to the union of the namespaces listed in this field -and the ones selected by namespaceSelector. -null or empty namespaces list and null namespaceSelector means "this pod's namespace".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm) - - - -A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.labelSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermlabelselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinityterm) - - - -A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[index].podAffinityTerm.namespaceSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinitypreferredduringschedulingignoredduringexecutionindexpodaffinitytermnamespaceselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinity) - - - -Defines a set of pods (namely those matching the labelSelector -relative to the given namespace(s)) that this pod should be -co-located (affinity) or not co-located (anti-affinity) with, -where co-located is defined as running on a node whose value of -the label with key matches that of any node on which -a pod of the set of pods is running - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    topologyKeystring - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching -the labelSelector in the specified namespaces, where co-located is defined as running on a node -whose value of the label with key topologyKey matches that of any node on which any of the -selected pods is running. -Empty topologyKey is not allowed.
    -
    true
    labelSelectorobject - A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods.
    -
    false
    matchLabelKeys[]string - MatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both matchLabelKeys and labelSelector. -Also, matchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    mismatchLabelKeys[]string - MismatchLabelKeys is a set of pod label keys to select which pods will -be taken into consideration. The keys are used to lookup values from the -incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` -to select the group of existing pods which pods will be taken into consideration -for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming -pod labels will be ignored. The default value is empty. -The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. -Also, mismatchLabelKeys cannot be set when labelSelector isn't set. -This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
    -
    false
    namespaceSelectorobject - A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces.
    -
    false
    namespaces[]string - namespaces specifies a static list of namespace names that the term applies to. -The term is applied to the union of the namespaces listed in this field -and the ones selected by namespaceSelector. -null or empty namespaces list and null namespaceSelector means "this pod's namespace".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindex) - - - -A label query over a set of resources, in this case pods. -If it's null, this PodAffinityTerm matches with no Pods. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].labelSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindexlabelselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindex) - - - -A label query over the set of namespaces that the term applies to. -The term is applied to the union of the namespaces selected by this field -and the ones listed in the namespaces field. -null selector and null or empty namespaces list means "this pod's namespace". -An empty selector ({}) matches all namespaces. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    matchExpressions[]object - matchExpressions is a list of label selector requirements. The requirements are ANDed.
    -
    false
    matchLabelsmap[string]string - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels -map is equivalent to an element of matchExpressions, whose key field is "key", the -operator is "In", and the values array contains only "value". The requirements are ANDed.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution[index].namespaceSelector.matchExpressions[index] -[↩ Parent](#proxyclassspecstatefulsetpodaffinitypodantiaffinityrequiredduringschedulingignoredduringexecutionindexnamespaceselector) - - - -A label selector requirement is a selector that contains values, a key, and an operator that -relates the key and values. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    keystring - key is the label key that the selector applies to.
    -
    true
    operatorstring - operator represents a key's relationship to a set of values. -Valid operators are In, NotIn, Exists and DoesNotExist.
    -
    true
    values[]string - values is an array of string values. If the operator is In or NotIn, -the values array must be non-empty. If the operator is Exists or DoesNotExist, -the values array must be empty. This array is replaced during a strategic -merge patch.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.imagePullSecrets[index] -[↩ Parent](#proxyclassspecstatefulsetpod) - - - -LocalObjectReference contains enough information to let you locate the -referenced object inside the same namespace. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    namestring - Name of the referent. -This field is effectively required, but due to backwards compatibility is -allowed to be empty. Instances of this type with an empty value here are -almost certainly wrong. -More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
    -
    - Default:
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.securityContext -[↩ Parent](#proxyclassspecstatefulsetpod) - - - -Proxy Pod's security context. -By default Tailscale Kubernetes operator does not apply any Pod -security context. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    appArmorProfileobject - appArmorProfile is the AppArmor options to use by the containers in this pod. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    fsGroupinteger - A special supplemental group that applies to all containers in a pod. -Some volume types allow the Kubelet to change the ownership of that volume -to be owned by the pod: - -1. The owning GID will be the FSGroup -2. The setgid bit is set (new files created in the volume will be owned by FSGroup) -3. The permission bits are OR'd with rw-rw---- - -If unset, the Kubelet will not modify the ownership and permissions of any volume. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    fsGroupChangePolicystring - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume -before being exposed inside Pod. This field will only apply to -volume types which support fsGroup based ownership(and permissions). -It will have no effect on ephemeral volume types such as: secret, configmaps -and emptydir. -Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    runAsGroupinteger - The GID to run the entrypoint of the container process. -Uses runtime default if unset. -May also be set in SecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence -for that container. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    runAsNonRootboolean - Indicates that the container must run as a non-root user. -If true, the Kubelet will validate the image at runtime to ensure that it -does not run as UID 0 (root) and fail to start the container if it does. -If unset or false, no such validation will be performed. -May also be set in SecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence.
    -
    false
    runAsUserinteger - The UID to run the entrypoint of the container process. -Defaults to user specified in image metadata if unspecified. -May also be set in SecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence -for that container. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    seLinuxOptionsobject - The SELinux context to be applied to all containers. -If unspecified, the container runtime will allocate a random SELinux context for each -container. May also be set in SecurityContext. If set in -both SecurityContext and PodSecurityContext, the value specified in SecurityContext -takes precedence for that container. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    seccompProfileobject - The seccomp options to use by the containers in this pod. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    supplementalGroups[]integer - A list of groups applied to the first process run in each container, in addition -to the container's primary GID, the fsGroup (if specified), and group memberships -defined in the container image for the uid of the container process. If unspecified, -no additional groups are added to any container. Note that group memberships -defined in the container image for the uid of the container process are still effective, -even if they are not included in this list. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    sysctls[]object - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported -sysctls (by the container runtime) might fail to launch. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    windowsOptionsobject - The Windows specific settings applied to all containers. -If unspecified, the options within a container's SecurityContext will be used. -If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is linux.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.securityContext.appArmorProfile -[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext) - - - -appArmorProfile is the AppArmor options to use by the containers in this pod. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    typestring - type indicates which kind of AppArmor profile will be applied. -Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement.
    -
    true
    localhostProfilestring - localhostProfile indicates a profile loaded on the node that should be used. -The profile must be preconfigured on the node to work. -Must match the loaded name of the profile. -Must be set if and only if type is "Localhost".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.securityContext.seLinuxOptions -[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext) - - - -The SELinux context to be applied to all containers. -If unspecified, the container runtime will allocate a random SELinux context for each -container. May also be set in SecurityContext. If set in -both SecurityContext and PodSecurityContext, the value specified in SecurityContext -takes precedence for that container. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    levelstring - Level is SELinux level label that applies to the container.
    -
    false
    rolestring - Role is a SELinux role label that applies to the container.
    -
    false
    typestring - Type is a SELinux type label that applies to the container.
    -
    false
    userstring - User is a SELinux user label that applies to the container.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.securityContext.seccompProfile -[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext) - - - -The seccomp options to use by the containers in this pod. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    typestring - type indicates which kind of seccomp profile will be applied. -Valid options are: - -Localhost - a profile defined in a file on the node should be used. -RuntimeDefault - the container runtime default profile should be used. -Unconfined - no profile should be applied.
    -
    true
    localhostProfilestring - localhostProfile indicates a profile defined in a file on the node should be used. -The profile must be preconfigured on the node to work. -Must be a descending path, relative to the kubelet's configured seccomp profile location. -Must be set if type is "Localhost". Must NOT be set for any other type.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.securityContext.sysctls[index] -[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext) - - - -Sysctl defines a kernel parameter to be set - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    namestring - Name of a property to set
    -
    true
    valuestring - Value of a property to set
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.securityContext.windowsOptions -[↩ Parent](#proxyclassspecstatefulsetpodsecuritycontext) - - - -The Windows specific settings applied to all containers. -If unspecified, the options within a container's SecurityContext will be used. -If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is linux. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    gmsaCredentialSpecstring - GMSACredentialSpec is where the GMSA admission webhook -(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the -GMSA credential spec named by the GMSACredentialSpecName field.
    -
    false
    gmsaCredentialSpecNamestring - GMSACredentialSpecName is the name of the GMSA credential spec to use.
    -
    false
    hostProcessboolean - HostProcess determines if a container should be run as a 'Host Process' container. -All of a Pod's containers must have the same effective HostProcess value -(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). -In addition, if HostProcess is true then HostNetwork must also be set to true.
    -
    false
    runAsUserNamestring - The UserName in Windows to run the entrypoint of the container process. -Defaults to the user specified in image metadata if unspecified. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer -[↩ Parent](#proxyclassspecstatefulsetpod) - - - -Configuration for the proxy container running tailscale. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    env[]object - List of environment variables to set in the container. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables -Note that environment variables provided here will take precedence -over Tailscale-specific environment variables set by the operator, -however running proxies with custom values for Tailscale environment -variables (i.e TS_USERSPACE) is not recommended and might break in -the future.
    -
    false
    imagestring - Container image name. By default images are pulled from -docker.io/tailscale/tailscale, but the official images are also -available at ghcr.io/tailscale/tailscale. Specifying image name here -will override any proxy image values specified via the Kubernetes -operator's Helm chart values or PROXY_IMAGE env var in the operator -Deployment. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
    -
    false
    imagePullPolicyenum - Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
    -
    - Enum: Always, Never, IfNotPresent
    -
    false
    resourcesobject - Container resource requirements. -By default Tailscale Kubernetes operator does not apply any resource -requirements. The amount of resources required wil depend on the -amount of resources the operator needs to parse, usage patterns and -cluster size. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
    -
    false
    securityContextobject - Container security context. -Security context specified here will override the security context by the operator. -By default the operator: -- sets 'privileged: true' for the init container -- set NET_ADMIN capability for tailscale container for proxies that -are created for Services or Connector. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.env[index] -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainer) - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    namestring - Name of the environment variable. Must be a C_IDENTIFIER.
    -
    true
    valuestring - Variable references $(VAR_NAME) are expanded using the previously defined - environment variables in the container and any service environment -variables. If a variable cannot be resolved, the reference in the input -string will be unchanged. Double $$ are reduced to a single $, which -allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will -produce the string literal "$(VAR_NAME)". Escaped references will never -be expanded, regardless of whether the variable exists or not. Defaults -to "".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.resources -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainer) - - - -Container resource requirements. -By default Tailscale Kubernetes operator does not apply any resource -requirements. The amount of resources required wil depend on the -amount of resources the operator needs to parse, usage patterns and -cluster size. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    claims[]object - Claims lists the names of resources, defined in spec.resourceClaims, -that are used by this container. - -This is an alpha field and requires enabling the -DynamicResourceAllocation feature gate. - -This field is immutable. It can only be set for containers.
    -
    false
    limitsmap[string]int or string - Limits describes the maximum amount of compute resources allowed. -More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    -
    false
    requestsmap[string]int or string - Requests describes the minimum amount of compute resources required. -If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, -otherwise to an implementation-defined value. Requests cannot exceed Limits. -More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.resources.claims[index] -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainerresources) - - - -ResourceClaim references one entry in PodSpec.ResourceClaims. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    namestring - Name must match the name of one entry in pod.spec.resourceClaims of -the Pod where this field is used. It makes that resource available -inside a container.
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainer) - - - -Container security context. -Security context specified here will override the security context by the operator. -By default the operator: -- sets 'privileged: true' for the init container -- set NET_ADMIN capability for tailscale container for proxies that -are created for Services or Connector. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    allowPrivilegeEscalationboolean - AllowPrivilegeEscalation controls whether a process can gain more -privileges than its parent process. This bool directly controls if -the no_new_privs flag will be set on the container process. -AllowPrivilegeEscalation is true always when the container is: -1) run as Privileged -2) has CAP_SYS_ADMIN -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    appArmorProfileobject - appArmorProfile is the AppArmor options to use by this container. If set, this profile -overrides the pod's appArmorProfile. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    capabilitiesobject - The capabilities to add/drop when running containers. -Defaults to the default set of capabilities granted by the container runtime. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    privilegedboolean - Run container in privileged mode. -Processes in privileged containers are essentially equivalent to root on the host. -Defaults to false. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    procMountstring - procMount denotes the type of proc mount to use for the containers. -The default is DefaultProcMount which uses the container runtime defaults for -readonly paths and masked paths. -This requires the ProcMountType feature flag to be enabled. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    readOnlyRootFilesystemboolean - Whether this container has a read-only root filesystem. -Default is false. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    runAsGroupinteger - The GID to run the entrypoint of the container process. -Uses runtime default if unset. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    runAsNonRootboolean - Indicates that the container must run as a non-root user. -If true, the Kubelet will validate the image at runtime to ensure that it -does not run as UID 0 (root) and fail to start the container if it does. -If unset or false, no such validation will be performed. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence.
    -
    false
    runAsUserinteger - The UID to run the entrypoint of the container process. -Defaults to user specified in image metadata if unspecified. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    seLinuxOptionsobject - The SELinux context to be applied to the container. -If unspecified, the container runtime will allocate a random SELinux context for each -container. May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    seccompProfileobject - The seccomp options to use by this container. If seccomp options are -provided at both the pod & container level, the container options -override the pod options. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    windowsOptionsobject - The Windows specific settings applied to all containers. -If unspecified, the options from the PodSecurityContext will be used. -If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is linux.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.appArmorProfile -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext) - - - -appArmorProfile is the AppArmor options to use by this container. If set, this profile -overrides the pod's appArmorProfile. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    typestring - type indicates which kind of AppArmor profile will be applied. -Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement.
    -
    true
    localhostProfilestring - localhostProfile indicates a profile loaded on the node that should be used. -The profile must be preconfigured on the node to work. -Must match the loaded name of the profile. -Must be set if and only if type is "Localhost".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.capabilities -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext) - - - -The capabilities to add/drop when running containers. -Defaults to the default set of capabilities granted by the container runtime. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    add[]string - Added capabilities
    -
    false
    drop[]string - Removed capabilities
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.seLinuxOptions -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext) - - - -The SELinux context to be applied to the container. -If unspecified, the container runtime will allocate a random SELinux context for each -container. May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    levelstring - Level is SELinux level label that applies to the container.
    -
    false
    rolestring - Role is a SELinux role label that applies to the container.
    -
    false
    typestring - Type is a SELinux type label that applies to the container.
    -
    false
    userstring - User is a SELinux user label that applies to the container.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.seccompProfile -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext) - - - -The seccomp options to use by this container. If seccomp options are -provided at both the pod & container level, the container options -override the pod options. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    typestring - type indicates which kind of seccomp profile will be applied. -Valid options are: - -Localhost - a profile defined in a file on the node should be used. -RuntimeDefault - the container runtime default profile should be used. -Unconfined - no profile should be applied.
    -
    true
    localhostProfilestring - localhostProfile indicates a profile defined in a file on the node should be used. -The profile must be preconfigured on the node to work. -Must be a descending path, relative to the kubelet's configured seccomp profile location. -Must be set if type is "Localhost". Must NOT be set for any other type.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleContainer.securityContext.windowsOptions -[↩ Parent](#proxyclassspecstatefulsetpodtailscalecontainersecuritycontext) - - - -The Windows specific settings applied to all containers. -If unspecified, the options from the PodSecurityContext will be used. -If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is linux. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    gmsaCredentialSpecstring - GMSACredentialSpec is where the GMSA admission webhook -(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the -GMSA credential spec named by the GMSACredentialSpecName field.
    -
    false
    gmsaCredentialSpecNamestring - GMSACredentialSpecName is the name of the GMSA credential spec to use.
    -
    false
    hostProcessboolean - HostProcess determines if a container should be run as a 'Host Process' container. -All of a Pod's containers must have the same effective HostProcess value -(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). -In addition, if HostProcess is true then HostNetwork must also be set to true.
    -
    false
    runAsUserNamestring - The UserName in Windows to run the entrypoint of the container process. -Defaults to the user specified in image metadata if unspecified. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer -[↩ Parent](#proxyclassspecstatefulsetpod) - - - -Configuration for the proxy init container that enables forwarding. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    env[]object - List of environment variables to set in the container. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables -Note that environment variables provided here will take precedence -over Tailscale-specific environment variables set by the operator, -however running proxies with custom values for Tailscale environment -variables (i.e TS_USERSPACE) is not recommended and might break in -the future.
    -
    false
    imagestring - Container image name. By default images are pulled from -docker.io/tailscale/tailscale, but the official images are also -available at ghcr.io/tailscale/tailscale. Specifying image name here -will override any proxy image values specified via the Kubernetes -operator's Helm chart values or PROXY_IMAGE env var in the operator -Deployment. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
    -
    false
    imagePullPolicyenum - Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image
    -
    - Enum: Always, Never, IfNotPresent
    -
    false
    resourcesobject - Container resource requirements. -By default Tailscale Kubernetes operator does not apply any resource -requirements. The amount of resources required wil depend on the -amount of resources the operator needs to parse, usage patterns and -cluster size. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources
    -
    false
    securityContextobject - Container security context. -Security context specified here will override the security context by the operator. -By default the operator: -- sets 'privileged: true' for the init container -- set NET_ADMIN capability for tailscale container for proxies that -are created for Services or Connector. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.env[index] -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainer) - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    namestring - Name of the environment variable. Must be a C_IDENTIFIER.
    -
    true
    valuestring - Variable references $(VAR_NAME) are expanded using the previously defined - environment variables in the container and any service environment -variables. If a variable cannot be resolved, the reference in the input -string will be unchanged. Double $$ are reduced to a single $, which -allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will -produce the string literal "$(VAR_NAME)". Escaped references will never -be expanded, regardless of whether the variable exists or not. Defaults -to "".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.resources -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainer) - - - -Container resource requirements. -By default Tailscale Kubernetes operator does not apply any resource -requirements. The amount of resources required wil depend on the -amount of resources the operator needs to parse, usage patterns and -cluster size. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    claims[]object - Claims lists the names of resources, defined in spec.resourceClaims, -that are used by this container. - -This is an alpha field and requires enabling the -DynamicResourceAllocation feature gate. - -This field is immutable. It can only be set for containers.
    -
    false
    limitsmap[string]int or string - Limits describes the maximum amount of compute resources allowed. -More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    -
    false
    requestsmap[string]int or string - Requests describes the minimum amount of compute resources required. -If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, -otherwise to an implementation-defined value. Requests cannot exceed Limits. -More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.resources.claims[index] -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainerresources) - - - -ResourceClaim references one entry in PodSpec.ResourceClaims. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    namestring - Name must match the name of one entry in pod.spec.resourceClaims of -the Pod where this field is used. It makes that resource available -inside a container.
    -
    true
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainer) - - - -Container security context. -Security context specified here will override the security context by the operator. -By default the operator: -- sets 'privileged: true' for the init container -- set NET_ADMIN capability for tailscale container for proxies that -are created for Services or Connector. -https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    allowPrivilegeEscalationboolean - AllowPrivilegeEscalation controls whether a process can gain more -privileges than its parent process. This bool directly controls if -the no_new_privs flag will be set on the container process. -AllowPrivilegeEscalation is true always when the container is: -1) run as Privileged -2) has CAP_SYS_ADMIN -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    appArmorProfileobject - appArmorProfile is the AppArmor options to use by this container. If set, this profile -overrides the pod's appArmorProfile. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    capabilitiesobject - The capabilities to add/drop when running containers. -Defaults to the default set of capabilities granted by the container runtime. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    privilegedboolean - Run container in privileged mode. -Processes in privileged containers are essentially equivalent to root on the host. -Defaults to false. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    procMountstring - procMount denotes the type of proc mount to use for the containers. -The default is DefaultProcMount which uses the container runtime defaults for -readonly paths and masked paths. -This requires the ProcMountType feature flag to be enabled. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    readOnlyRootFilesystemboolean - Whether this container has a read-only root filesystem. -Default is false. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    runAsGroupinteger - The GID to run the entrypoint of the container process. -Uses runtime default if unset. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    runAsNonRootboolean - Indicates that the container must run as a non-root user. -If true, the Kubelet will validate the image at runtime to ensure that it -does not run as UID 0 (root) and fail to start the container if it does. -If unset or false, no such validation will be performed. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence.
    -
    false
    runAsUserinteger - The UID to run the entrypoint of the container process. -Defaults to user specified in image metadata if unspecified. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows.
    -
    - Format: int64
    -
    false
    seLinuxOptionsobject - The SELinux context to be applied to the container. -If unspecified, the container runtime will allocate a random SELinux context for each -container. May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    seccompProfileobject - The seccomp options to use by this container. If seccomp options are -provided at both the pod & container level, the container options -override the pod options. -Note that this field cannot be set when spec.os.name is windows.
    -
    false
    windowsOptionsobject - The Windows specific settings applied to all containers. -If unspecified, the options from the PodSecurityContext will be used. -If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is linux.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.appArmorProfile -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext) - - - -appArmorProfile is the AppArmor options to use by this container. If set, this profile -overrides the pod's appArmorProfile. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    typestring - type indicates which kind of AppArmor profile will be applied. -Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement.
    -
    true
    localhostProfilestring - localhostProfile indicates a profile loaded on the node that should be used. -The profile must be preconfigured on the node to work. -Must match the loaded name of the profile. -Must be set if and only if type is "Localhost".
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.capabilities -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext) - - - -The capabilities to add/drop when running containers. -Defaults to the default set of capabilities granted by the container runtime. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    add[]string - Added capabilities
    -
    false
    drop[]string - Removed capabilities
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.seLinuxOptions -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext) - - - -The SELinux context to be applied to the container. -If unspecified, the container runtime will allocate a random SELinux context for each -container. May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    levelstring - Level is SELinux level label that applies to the container.
    -
    false
    rolestring - Role is a SELinux role label that applies to the container.
    -
    false
    typestring - Type is a SELinux type label that applies to the container.
    -
    false
    userstring - User is a SELinux user label that applies to the container.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.seccompProfile -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext) - - - -The seccomp options to use by this container. If seccomp options are -provided at both the pod & container level, the container options -override the pod options. -Note that this field cannot be set when spec.os.name is windows. - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    typestring - type indicates which kind of seccomp profile will be applied. -Valid options are: - -Localhost - a profile defined in a file on the node should be used. -RuntimeDefault - the container runtime default profile should be used. -Unconfined - no profile should be applied.
    -
    true
    localhostProfilestring - localhostProfile indicates a profile defined in a file on the node should be used. -The profile must be preconfigured on the node to work. -Must be a descending path, relative to the kubelet's configured seccomp profile location. -Must be set if type is "Localhost". Must NOT be set for any other type.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tailscaleInitContainer.securityContext.windowsOptions -[↩ Parent](#proxyclassspecstatefulsetpodtailscaleinitcontainersecuritycontext) - - - -The Windows specific settings applied to all containers. -If unspecified, the options from the PodSecurityContext will be used. -If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Note that this field cannot be set when spec.os.name is linux. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    gmsaCredentialSpecstring - GMSACredentialSpec is where the GMSA admission webhook -(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the -GMSA credential spec named by the GMSACredentialSpecName field.
    -
    false
    gmsaCredentialSpecNamestring - GMSACredentialSpecName is the name of the GMSA credential spec to use.
    -
    false
    hostProcessboolean - HostProcess determines if a container should be run as a 'Host Process' container. -All of a Pod's containers must have the same effective HostProcess value -(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). -In addition, if HostProcess is true then HostNetwork must also be set to true.
    -
    false
    runAsUserNamestring - The UserName in Windows to run the entrypoint of the container process. -Defaults to the user specified in image metadata if unspecified. -May also be set in PodSecurityContext. If set in both SecurityContext and -PodSecurityContext, the value specified in SecurityContext takes precedence.
    -
    false
    - - -### ProxyClass.spec.statefulSet.pod.tolerations[index] -[↩ Parent](#proxyclassspecstatefulsetpod) - - - -The pod this Toleration is attached to tolerates any taint that matches -the triple using the matching operator . - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    effectstring - Effect indicates the taint effect to match. Empty means match all taint effects. -When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
    -
    false
    keystring - Key is the taint key that the toleration applies to. Empty means match all taint keys. -If the key is empty, operator must be Exists; this combination means to match all values and all keys.
    -
    false
    operatorstring - Operator represents a key's relationship to the value. -Valid operators are Exists and Equal. Defaults to Equal. -Exists is equivalent to wildcard for value, so that a pod can -tolerate all taints of a particular category.
    -
    false
    tolerationSecondsinteger - TolerationSeconds represents the period of time the toleration (which must be -of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, -it is not set, which means tolerate the taint forever (do not evict). Zero and -negative values will be treated as 0 (evict immediately) by the system.
    -
    - Format: int64
    -
    false
    valuestring - Value is the taint value the toleration matches to. -If the operator is Exists, the value should be empty, otherwise just a regular string.
    -
    false
    - - -### ProxyClass.spec.tailscale -[↩ Parent](#proxyclassspec) - - - -TailscaleConfig contains options to configure the tailscale-specific -parameters of proxies. - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    acceptRoutesboolean - AcceptRoutes can be set to true to make the proxy instance accept -routes advertized by other nodes on the tailnet, such as subnet -routes. -This is equivalent of passing --accept-routes flag to a tailscale Linux client. -https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines -Defaults to false.
    -
    false
    - - -### ProxyClass.status -[↩ Parent](#proxyclass) - - - -Status of the ProxyClass. This is set and managed automatically. -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    conditions[]object - List of status conditions to indicate the status of the ProxyClass. -Known condition types are `ProxyClassReady`.
    -
    false
    - - -### ProxyClass.status.conditions[index] -[↩ Parent](#proxyclassstatus) - - - -Condition contains details for one aspect of the current state of this API Resource. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionRequired
    lastTransitionTimestring - lastTransitionTime is the last time the condition transitioned from one status to another. -This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
    -
    - Format: date-time
    -
    true
    messagestring - message is a human readable message indicating details about the transition. -This may be an empty string.
    -
    true
    reasonstring - reason contains a programmatic identifier indicating the reason for the condition's last transition. -Producers of specific condition types may define expected values and meanings for this field, -and whether the values are considered a guaranteed API. -The value should be a CamelCase string. -This field may not be empty.
    -
    true
    statusenum - status of the condition, one of True, False, Unknown.
    -
    - Enum: True, False, Unknown
    -
    true
    typestring - type of condition in CamelCase or in foo.example.com/CamelCase.
    -
    true
    observedGenerationinteger - observedGeneration represents the .metadata.generation that the condition was set based upon. -For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date -with respect to the current state of the instance.
    -
    - Format: int64
    - Minimum: 0
    -
    false
    + + +_Appears in:_ +- [ProxyClassList](#proxyclasslist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyClass` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
    Servers may infer this from the endpoint the client submits requests to.
    Cannot be updated.
    In CamelCase.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
    Servers should convert recognized schemas to the latest internal value, and
    may reject unrecognized values.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ProxyClassSpec](#proxyclassspec)_ | Specification of the desired state of the ProxyClass resource.
    https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[ProxyClassStatus](#proxyclassstatus)_ | Status of the ProxyClass. This is set and managed automatically.
    https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | + + +#### ProxyClassList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyClassList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
    Servers may infer this from the endpoint the client submits requests to.
    Cannot be updated.
    In CamelCase.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
    Servers should convert recognized schemas to the latest internal value, and
    may reject unrecognized values.
    More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ProxyClass](#proxyclass) array_ | | | | + + +#### ProxyClassSpec + + + + + + + +_Appears in:_ +- [ProxyClass](#proxyclass) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `statefulSet` _[StatefulSet](#statefulset)_ | Configuration parameters for the proxy's StatefulSet. Tailscale
    Kubernetes operator deploys a StatefulSet for each of the user
    configured proxies (Tailscale Ingress, Tailscale Service, Connector). | | | +| `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported
    for egress proxies and for Ingress proxies that have been configured
    with tailscale.com/experimental-forward-cluster-traffic-via-ingress
    annotation. Note that the metrics are currently considered unstable
    and will likely change in breaking ways in the future - we only
    recommend that you use those for debugging purposes. | | | +| `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific
    parameters of proxies. | | | + + +#### ProxyClassStatus + + + + + + + +_Appears in:_ +- [ProxyClass](#proxyclass) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyClass.
    Known condition types are `ProxyClassReady`. | | | + + +#### Route + +_Underlying type:_ _string_ + + + +_Validation:_ +- Format: cidr +- Type: string + +_Appears in:_ +- [Routes](#routes) + + + +#### Routes + +_Underlying type:_ _[Route](#route)_ + + + +_Validation:_ +- Format: cidr +- MinItems: 1 +- Type: string + +_Appears in:_ +- [SubnetRouter](#subnetrouter) + + + +#### StatefulSet + + + + + + + +_Appears in:_ +- [ProxyClassSpec](#proxyclassspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `labels` _object (keys:string, values:string)_ | Labels that will be added to the StatefulSet created for the proxy.
    Any labels specified here will be merged with the default labels
    applied to the StatefulSet by the Tailscale Kubernetes operator as
    well as any other labels that might have been applied by other
    actors.
    Label keys and values must be valid Kubernetes label keys and values.
    https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set | | | +| `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the StatefulSet created for the proxy.
    Any Annotations specified here will be merged with the default annotations
    applied to the StatefulSet by the Tailscale Kubernetes operator as
    well as any other annotations that might have been applied by other
    actors.
    Annotations must be valid Kubernetes annotations.
    https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | +| `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | + + +#### SubnetRouter + + + +SubnetRouter defines subnet routes that should be exposed to tailnet via a +Connector node. + + + +_Appears in:_ +- [ConnectorSpec](#connectorspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `advertiseRoutes` _[Routes](#routes)_ | AdvertiseRoutes refer to CIDRs that the subnet router should make
    available. Route values must be strings that represent a valid IPv4
    or IPv6 CIDR range. Values can be Tailscale 4via6 subnet routes.
    https://tailscale.com/kb/1201/4via6-subnets/ | | Format: cidr
    MinItems: 1
    Type: string
    | + + +#### Tag + +_Underlying type:_ _string_ + + + +_Validation:_ +- Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$` +- Type: string + +_Appears in:_ +- [Tags](#tags) + + + +#### Tags + +_Underlying type:_ _[Tag](#tag)_ + + + +_Validation:_ +- Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$` +- Type: string + +_Appears in:_ +- [ConnectorSpec](#connectorspec) + + + +#### TailscaleConfig + + + + + + + +_Appears in:_ +- [ProxyClassSpec](#proxyclassspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `acceptRoutes` _boolean_ | AcceptRoutes can be set to true to make the proxy instance accept
    routes advertized by other nodes on the tailnet, such as subnet
    routes.
    This is equivalent of passing --accept-routes flag to a tailscale Linux client.
    https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines
    Defaults to false. | | | + + diff --git a/k8s-operator/sessionrecording/fakes/fakes.go b/k8s-operator/sessionrecording/fakes/fakes.go new file mode 100644 index 0000000000000..9eb1047e4242f --- /dev/null +++ b/k8s-operator/sessionrecording/fakes/fakes.go @@ -0,0 +1,138 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package fakes contains mocks used for testing 'kubectl exec' session +// recording functionality. +package fakes + +import ( + "bytes" + "encoding/json" + "net" + "sync" + "testing" + "time" + + "math/rand" + + "tailscale.com/sessionrecording" + "tailscale.com/tstime" +) + +func New(conn net.Conn, wb bytes.Buffer, rb bytes.Buffer, closed bool) net.Conn { + return &TestConn{ + Conn: conn, + writeBuf: wb, + readBuf: rb, + closed: closed, + } +} + +type TestConn struct { + net.Conn + // writeBuf contains whatever was send to the conn via Write. + writeBuf bytes.Buffer + // readBuf contains whatever was sent to the conn via Read. + readBuf bytes.Buffer + sync.RWMutex // protects the following + closed bool +} + +var _ net.Conn = &TestConn{} + +func (tc *TestConn) Read(b []byte) (int, error) { + return tc.readBuf.Read(b) +} + +func (tc *TestConn) Write(b []byte) (int, error) { + return tc.writeBuf.Write(b) +} + +func (tc *TestConn) Close() error { + tc.Lock() + defer tc.Unlock() + tc.closed = true + return nil +} + +func (tc *TestConn) IsClosed() bool { + tc.Lock() + defer tc.Unlock() + return tc.closed +} + +func (tc *TestConn) WriteBufBytes() []byte { + return tc.writeBuf.Bytes() +} + +func (tc *TestConn) ResetReadBuf() { + tc.readBuf.Reset() +} + +func (tc *TestConn) WriteReadBufBytes(b []byte) error { + _, err := tc.readBuf.Write(b) + return err +} + +type TestSessionRecorder struct { + // buf holds data that was sent to the session recorder. + buf bytes.Buffer +} + +func (t *TestSessionRecorder) Write(b []byte) (int, error) { + return t.buf.Write(b) +} + +func (t *TestSessionRecorder) Close() error { + t.buf.Reset() + return nil +} + +func (t *TestSessionRecorder) Bytes() []byte { + return t.buf.Bytes() +} + +func CastLine(t *testing.T, p []byte, clock tstime.Clock) []byte { + t.Helper() + j, err := json.Marshal([]any{ + clock.Now().Sub(clock.Now()).Seconds(), + "o", + string(p), + }) + if err != nil { + t.Fatalf("error marshalling cast line: %v", err) + } + return append(j, '\n') +} + +func AsciinemaResizeMsg(t *testing.T, width, height int) []byte { + t.Helper() + ch := sessionrecording.CastHeader{ + Width: width, + Height: height, + } + bs, err := json.Marshal(ch) + if err != nil { + t.Fatalf("error marshalling CastHeader: %v", err) + } + return append(bs, '\n') +} + +func RandomBytes(t *testing.T) [][]byte { + t.Helper() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + n := r.Intn(4096) + b := make([]byte, n) + t.Logf("RandomBytes: generating byte slice of length %d", n) + _, err := r.Read(b) + if err != nil { + t.Fatalf("error generating random byte slice: %v", err) + } + if len(b) < 2 { + return [][]byte{b} + } + split := r.Intn(len(b) - 1) + return [][]byte{b[:split], b[split:]} +} diff --git a/cmd/k8s-operator/spdy-hijacker.go b/k8s-operator/sessionrecording/hijacker.go similarity index 66% rename from cmd/k8s-operator/spdy-hijacker.go rename to k8s-operator/sessionrecording/hijacker.go index f74771e420914..2e7ec75980bac 100644 --- a/cmd/k8s-operator/spdy-hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -3,7 +3,9 @@ //go:build !plan9 -package main +// Package sessionrecording contains functionality for recording Kubernetes API +// server proxy 'kubectl exec' sessions. +package sessionrecording import ( "bufio" @@ -19,17 +21,68 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" + "tailscale.com/k8s-operator/sessionrecording/spdy" + "tailscale.com/k8s-operator/sessionrecording/tsrecorder" + "tailscale.com/k8s-operator/sessionrecording/ws" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/tstime" + "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" ) -// spdyHijacker implements [net/http.Hijacker] interface. +const ( + SPDYProtocol Protocol = "SPDY" + WSProtocol Protocol = "WebSocket" +) + +// Protocol is the streaming protocol of the hijacked session. Supported +// protocols are SPDY and WebSocket. +type Protocol string + +var ( + // CounterSessionRecordingsAttempted counts the number of session recording attempts. + CounterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_attempted") + + // counterSessionRecordingsUploaded counts the number of successfully uploaded session recordings. + counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded") +) + +func New(opts HijackerOpts) *Hijacker { + return &Hijacker{ + ts: opts.TS, + req: opts.Req, + who: opts.Who, + ResponseWriter: opts.W, + pod: opts.Pod, + ns: opts.Namespace, + addrs: opts.Addrs, + failOpen: opts.FailOpen, + proto: opts.Proto, + log: opts.Log, + connectToRecorder: sessionrecording.ConnectToRecorder, + } +} + +type HijackerOpts struct { + TS *tsnet.Server + Req *http.Request + W http.ResponseWriter + Who *apitype.WhoIsResponse + Addrs []netip.AddrPort + Log *zap.SugaredLogger + Pod string + Namespace string + FailOpen bool + Proto Protocol +} + +// Hijacker implements [net/http.Hijacker] interface. // It must be configured with an http request for a 'kubectl exec' session that // needs to be recorded. It knows how to hijack the connection and configure for // the session contents to be sent to a tsrecorder instance. -type spdyHijacker struct { +type Hijacker struct { http.ResponseWriter ts *tsnet.Server req *http.Request @@ -40,6 +93,7 @@ type spdyHijacker struct { addrs []netip.AddrPort // tsrecorder addresses failOpen bool // whether to fail open if recording fails connectToRecorder RecorderDialFn + proto Protocol // streaming protocol } // RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder @@ -51,7 +105,7 @@ type RecorderDialFn func(context.Context, []netip.AddrPort, func(context.Context // Hijack hijacks a 'kubectl exec' session and configures for the session // contents to be sent to a recorder. -func (h *spdyHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { +func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen) reqConn, brw, err := h.ResponseWriter.(http.Hijacker).Hijack() if err != nil { @@ -69,15 +123,19 @@ func (h *spdyHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { // spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording // logic. If connecting to the recorder fails or an error is received during the // session and spdyHijacker.failOpen is false, connection will be closed. -func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { +func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { const ( // https://docs.asciinema.org/manual/asciicast/v2/ asciicastv2 = 2 ) - var wc io.WriteCloser + var ( + wc io.WriteCloser + err error + errChan <-chan error + ) h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen) // TODO (irbekrm): send client a message that session will be recorded. - rw, _, errChan, err := h.connectToRecorder(ctx, h.addrs, h.ts.Dial) + wc, _, errChan, err = h.connectToRecorder(ctx, h.addrs, h.ts.Dial) if err != nil { msg := fmt.Sprintf("error connecting to session recorders: %v", err) if h.failOpen { @@ -94,27 +152,16 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C // TODO (irbekrm): log which recorder h.log.Info("successfully connected to a session recorder") - wc = rw cl := tstime.DefaultClock{} - lc := &spdyRemoteConnRecorder{ - log: h.log, - Conn: conn, - rec: &recorder{ - start: cl.Now(), - clock: cl, - failOpen: h.failOpen, - conn: wc, - }, - } - + rec := tsrecorder.New(wc, cl, cl.Now(), h.failOpen) qp := h.req.URL.Query() - ch := CastHeader{ + ch := sessionrecording.CastHeader{ Version: asciicastv2, - Timestamp: lc.rec.start.Unix(), + Timestamp: cl.Now().Unix(), Command: strings.Join(qp["command"], " "), SrcNode: strings.TrimSuffix(h.who.Node.Name, "."), SrcNodeID: h.who.Node.StableID, - Kubernetes: &Kubernetes{ + Kubernetes: &sessionrecording.Kubernetes{ PodName: h.pod, Namespace: h.ns, Container: strings.Join(qp["container"], " "), @@ -126,7 +173,17 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C } else { ch.SrcNodeTags = h.who.Node.Tags } - lc.ch = ch + + var lc net.Conn + switch h.proto { + case SPDYProtocol: + lc = spdy.New(conn, rec, ch, h.log) + case WSProtocol: + lc = ws.New(conn, rec, ch, h.log) + default: + return nil, fmt.Errorf("unknown protocol: %s", h.proto) + } + go func() { var err error select { @@ -147,7 +204,6 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C } msg += "; failure mode set to 'fail closed'; closing connection" h.log.Error(msg) - lc.failed = true // TODO (irbekrm): write a message to the client if err := lc.Close(); err != nil { h.log.Infof("error closing recorder connections: %v", err) @@ -157,52 +213,6 @@ func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.C return lc, nil } -// CastHeader is the asciicast header to be sent to the recorder at the start of -// the recording of a session. -// https://docs.asciinema.org/manual/asciicast/v2/#header -type CastHeader struct { - // Version is the asciinema file format version. - Version int `json:"version"` - - // Width is the terminal width in characters. - Width int `json:"width"` - - // Height is the terminal height in characters. - Height int `json:"height"` - - // Timestamp is the unix timestamp of when the recording started. - Timestamp int64 `json:"timestamp"` - - // Tailscale-specific fields: SrcNode is the full MagicDNS name of the - // tailnet node originating the connection, without the trailing dot. - SrcNode string `json:"srcNode"` - - // SrcNodeID is the node ID of the tailnet node originating the connection. - SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"` - - // SrcNodeTags is the list of tags on the node originating the connection (if any). - SrcNodeTags []string `json:"srcNodeTags,omitempty"` - - // SrcNodeUserID is the user ID of the node originating the connection (if not tagged). - SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged - - // SrcNodeUser is the LoginName of the node originating the connection (if not tagged). - SrcNodeUser string `json:"srcNodeUser,omitempty"` - - Command string - - // Kubernetes-specific fields: - Kubernetes *Kubernetes `json:"kubernetes,omitempty"` -} - -// Kubernetes contains 'kubectl exec' session specific information for -// tsrecorder. -type Kubernetes struct { - PodName string - Namespace string - Container string -} - func closeConnWithWarning(conn net.Conn, msg string) error { b := io.NopCloser(bytes.NewBuffer([]byte(msg))) resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b} diff --git a/cmd/k8s-operator/spdy-hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go similarity index 72% rename from cmd/k8s-operator/spdy-hijacker_test.go rename to k8s-operator/sessionrecording/hijacker_test.go index 7ac79d7f0ca7d..5c19d3a1d870e 100644 --- a/cmd/k8s-operator/spdy-hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -3,7 +3,7 @@ //go:build !plan9 -package main +package sessionrecording import ( "context" @@ -19,12 +19,13 @@ import ( "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" + "tailscale.com/k8s-operator/sessionrecording/fakes" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/tstest" ) -func Test_SPDYHijacker(t *testing.T) { +func Test_Hijacker(t *testing.T) { zl, err := zap.NewDevelopment() if err != nil { t.Fatal(err) @@ -36,37 +37,47 @@ func Test_SPDYHijacker(t *testing.T) { failRecorderConnPostConnect bool // send error down the error channel wantsConnClosed bool wantsSetupErr bool + proto Protocol }{ { - name: "setup succeeds, conn stays open", + name: "setup_succeeds_conn_stays_open", + proto: SPDYProtocol, }, { - name: "setup fails, policy is to fail open, conn stays open", + name: "setup_succeeds_conn_stays_open_ws", + proto: WSProtocol, + }, + { + name: "setup_fails_policy_is_to_fail_open_conn_stays_open", failOpen: true, failRecorderConnect: true, + proto: SPDYProtocol, }, { - name: "setup fails, policy is to fail closed, conn is closed", + name: "setup_fails_policy_is_to_fail_closed_conn_is_closed", failRecorderConnect: true, wantsSetupErr: true, wantsConnClosed: true, + proto: SPDYProtocol, }, { - name: "connection fails post-initial connect, policy is to fail open, conn stays open", + name: "connection_fails_post-initial_connect_policy_is_to_fail_open_conn_stays_open", failRecorderConnPostConnect: true, failOpen: true, + proto: SPDYProtocol, }, { - name: "connection fails post-initial connect, policy is to fail closed, conn is closed", + name: "connection_fails_post-initial_connect,_policy_is_to_fail_closed_conn_is_closed", failRecorderConnPostConnect: true, wantsConnClosed: true, + proto: SPDYProtocol, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tc := &testConn{} + tc := &fakes.TestConn{} ch := make(chan error) - h := &spdyHijacker{ + h := &Hijacker{ connectToRecorder: func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) { if tt.failRecorderConnect { err = errors.New("test") @@ -78,6 +89,7 @@ func Test_SPDYHijacker(t *testing.T) { log: zl.Sugar(), ts: &tsnet.Server{}, req: &http.Request{URL: &url.URL{}}, + proto: tt.proto, } ctx := context.Background() _, err := h.setUpRecording(ctx, tc) @@ -98,8 +110,8 @@ func Test_SPDYHijacker(t *testing.T) { // (test that connection remains open over some period // of time). if err := tstest.WaitFor(timeout, func() (err error) { - if tt.wantsConnClosed != tc.isClosed() { - return fmt.Errorf("got connection state: %t, wants connection state: %t", tc.isClosed(), tt.wantsConnClosed) + if tt.wantsConnClosed != tc.IsClosed() { + return fmt.Errorf("got connection state: %t, wants connection state: %t", tc.IsClosed(), tt.wantsConnClosed) } return nil }); err != nil { diff --git a/cmd/k8s-operator/spdy-remote-conn-recorder.go b/k8s-operator/sessionrecording/spdy/conn.go similarity index 70% rename from cmd/k8s-operator/spdy-remote-conn-recorder.go rename to k8s-operator/sessionrecording/spdy/conn.go index 563b2a2410b3e..19a01641e4155 100644 --- a/cmd/k8s-operator/spdy-remote-conn-recorder.go +++ b/k8s-operator/sessionrecording/spdy/conn.go @@ -3,7 +3,9 @@ //go:build !plan9 -package main +// Package spdy contains functionality for parsing SPDY streaming sessions. This +// is used for 'kubectl exec' session recording. +package spdy import ( "bytes" @@ -17,16 +19,35 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + "tailscale.com/k8s-operator/sessionrecording/tsrecorder" + "tailscale.com/sessionrecording" ) -// spdyRemoteConnRecorder is a wrapper around net.Conn. It reads the bytestream -// for a 'kubectl exec' session, sends session recording data to the configured -// recorder and forwards the raw bytes to the original destination. -type spdyRemoteConnRecorder struct { +// New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. +// The connection must be a hijacked connection for a 'kubectl exec' session using SPDY. +// The hijacked connection is used to transmit SPDY streams between Kubernetes client ('kubectl') and the destination container. +// Data read from the underlying network connection is data sent via one of the SPDY streams from the client to the container. +// Data written to the underlying connection is data sent from the container to the client. +// We parse the data and send everything for the STDOUT/STDERR streams to the configured tsrecorder as an asciinema recording with the provided header. +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#background-remotecommand-subprotocol +func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, log *zap.SugaredLogger) net.Conn { + return &conn{ + Conn: nc, + rec: rec, + ch: ch, + log: log, + } +} + +// conn is a wrapper around net.Conn. It reads the bytestream for a 'kubectl +// exec' session streamed using SPDY protocol, sends session recording data to +// the configured recorder and forwards the raw bytes to the original +// destination. +type conn struct { net.Conn // rec knows how to send data written to it to a tsrecorder instance. - rec *recorder - ch CastHeader + rec *tsrecorder.Client + ch sessionrecording.CastHeader stdoutStreamID atomic.Uint32 stderrStreamID atomic.Uint32 @@ -34,7 +55,6 @@ type spdyRemoteConnRecorder struct { wmu sync.Mutex // sequences writes closed bool - failed bool rmu sync.Mutex // sequences reads writeCastHeaderOnce sync.Once @@ -53,7 +73,7 @@ type spdyRemoteConnRecorder struct { // If the frame is a data frame for resize stream, sends resize message to the // recorder. If the frame is a SYN_STREAM control frame that starts stdout, // stderr or resize stream, store the stream ID. -func (c *spdyRemoteConnRecorder) Read(b []byte) (int, error) { +func (c *conn) Read(b []byte) (int, error) { c.rmu.Lock() defer c.rmu.Unlock() n, err := c.Conn.Read(b) @@ -103,7 +123,7 @@ func (c *spdyRemoteConnRecorder) Read(b []byte) (int, error) { // Write forwards the raw data of the latest parsed SPDY frame to the original // destination. If the frame is an SPDY data frame, it also sends the payload to // the connected session recorder. -func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) { +func (c *conn) Write(b []byte) (int, error) { c.wmu.Lock() defer c.wmu.Unlock() c.writeBuf.Write(b) @@ -133,7 +153,7 @@ func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) { return } j = append(j, '\n') - err = c.rec.writeCastLine(j) + err = c.rec.WriteCastLine(j) if err != nil { c.log.Errorf("received error from recorder: %v", err) } @@ -151,15 +171,12 @@ func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) { return len(b), err } -func (c *spdyRemoteConnRecorder) Close() error { +func (c *conn) Close() error { c.wmu.Lock() defer c.wmu.Unlock() if c.closed { return nil } - if !c.failed && c.writeBuf.Len() > 0 { - c.Conn.Write(c.writeBuf.Bytes()) - } c.writeBuf.Reset() c.closed = true err := c.Conn.Close() @@ -167,13 +184,13 @@ func (c *spdyRemoteConnRecorder) Close() error { return err } -// parseSynStream parses SYN_STREAM SPDY control frame and updates -// spdyRemoteConnRecorder to store the newly created stream's ID if it is one of +// storeStreamID parses SYN_STREAM SPDY control frame and updates +// conn to store the newly created stream's ID if it is one of // the stream types we care about. Storing stream_id:stream_type mapping allows // us to parse received data frames (that have stream IDs) differently depening // on which stream they belong to (i.e send data frame payload for stdout stream // to session recorder). -func (c *spdyRemoteConnRecorder) storeStreamID(sf spdyFrame, header http.Header) { +func (c *conn) storeStreamID(sf spdyFrame, header http.Header) { const ( streamTypeHeaderKey = "Streamtype" ) diff --git a/cmd/k8s-operator/spdy-remote-conn-recorder_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go similarity index 78% rename from cmd/k8s-operator/spdy-remote-conn-recorder_test.go rename to k8s-operator/sessionrecording/spdy/conn_test.go index 95f5a8bfcef5e..629536b2e00b1 100644 --- a/cmd/k8s-operator/spdy-remote-conn-recorder_test.go +++ b/k8s-operator/sessionrecording/spdy/conn_test.go @@ -3,19 +3,19 @@ //go:build !plan9 -package main +package spdy import ( - "bytes" "encoding/json" - "net" + "fmt" "reflect" - "sync" "testing" "go.uber.org/zap" + "tailscale.com/k8s-operator/sessionrecording/fakes" + "tailscale.com/k8s-operator/sessionrecording/tsrecorder" + "tailscale.com/sessionrecording" "tailscale.com/tstest" - "tailscale.com/tstime" ) // Test_Writes tests that 1 or more Write calls to spdyRemoteConnRecorder @@ -56,13 +56,13 @@ func Test_Writes(t *testing.T) { name: "single_write_stdout_data_frame_with_payload", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { name: "single_write_stderr_data_frame_with_payload", inputs: [][]byte{{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { name: "single_data_frame_unknow_stream_with_payload", @@ -73,13 +73,13 @@ func Test_Writes(t *testing.T) { name: "control_frame_and_data_frame_split_across_two_writes", inputs: [][]byte{{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, {0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { name: "single_first_write_stdout_data_frame_with_payload", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(asciinemaResizeMsg(t, 10, 20), castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), + wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), width: 10, height: 20, firstWrite: true, @@ -87,19 +87,15 @@ func Test_Writes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tc := &testConn{} - sr := &testSessionRecorder{} - rec := &recorder{ - conn: sr, - clock: cl, - start: cl.Now(), - } + tc := &fakes.TestConn{} + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true) - c := &spdyRemoteConnRecorder{ + c := &conn{ Conn: tc, log: zl.Sugar(), rec: rec, - ch: CastHeader{ + ch: sessionrecording.CastHeader{ Width: tt.width, Height: tt.height, }, @@ -118,13 +114,13 @@ func Test_Writes(t *testing.T) { } // Assert that the expected bytes have been forwarded to the original destination. - gotForwarded := tc.writeBuf.Bytes() + gotForwarded := tc.WriteBufBytes() if !reflect.DeepEqual(gotForwarded, tt.wantForwarded) { t.Errorf("expected bytes not forwarded, wants\n%v\ngot\n%v", tt.wantForwarded, gotForwarded) } // Assert that the expected bytes have been forwarded to the session recorder. - gotRecorded := sr.buf.Bytes() + gotRecorded := sr.Bytes() if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded) } @@ -197,14 +193,10 @@ func Test_Reads(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tc := &testConn{} - sr := &testSessionRecorder{} - rec := &recorder{ - conn: sr, - clock: cl, - start: cl.Now(), - } - c := &spdyRemoteConnRecorder{ + tc := &fakes.TestConn{} + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true) + c := &conn{ Conn: tc, log: zl.Sugar(), rec: rec, @@ -213,9 +205,8 @@ func Test_Reads(t *testing.T) { for i, input := range tt.inputs { c.zlibReqReader = reader - tc.readBuf.Reset() - _, err := tc.readBuf.Write(input) - if err != nil { + tc.ResetReadBuf() + if err := tc.WriteReadBufBytes(input); err != nil { t.Fatalf("writing bytes to test conn: %v", err) } _, err = c.Read(make([]byte, len(input))) @@ -244,83 +235,62 @@ func Test_Reads(t *testing.T) { } } -func castLine(t *testing.T, p []byte, clock tstime.Clock) []byte { - t.Helper() - j, err := json.Marshal([]any{ - clock.Now().Sub(clock.Now()).Seconds(), - "o", - string(p), - }) +// Test_conn_ReadRand tests reading arbitrarily generated byte slices from conn to +// test that we don't panic when parsing input from a broken or malicious +// client. +func Test_conn_ReadRand(t *testing.T) { + zl, err := zap.NewDevelopment() if err != nil { - t.Fatalf("error marshalling cast line: %v", err) + t.Fatalf("error creating a test logger: %v", err) + } + for i := range 1000 { + tc := &fakes.TestConn{} + tc.ResetReadBuf() + c := &conn{ + Conn: tc, + log: zl.Sugar(), + } + bb := fakes.RandomBytes(t) + for j, input := range bb { + if err := tc.WriteReadBufBytes(input); err != nil { + t.Fatalf("[%d] writing bytes to test conn: %v", i, err) + } + f := func() { + c.Read(make([]byte, len(input))) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Read panic parsing input of length %d", i, j, len(input))) + } } - return append(j, '\n') } -func resizeMsgBytes(t *testing.T, width, height int) []byte { - t.Helper() - bs, err := json.Marshal(spdyResizeMsg{Width: width, Height: height}) +// Test_conn_WriteRand calls conn.Write with an arbitrary input to validate that +// it does not panic. +func Test_conn_WriteRand(t *testing.T) { + zl, err := zap.NewDevelopment() if err != nil { - t.Fatalf("error marshalling resizeMsg: %v", err) + t.Fatalf("error creating a test logger: %v", err) + } + for i := range 100 { + tc := &fakes.TestConn{} + c := &conn{ + Conn: tc, + log: zl.Sugar(), + } + bb := fakes.RandomBytes(t) + for j, input := range bb { + f := func() { + c.Write(input) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d", i, j, len(input))) + } } - return bs } -func asciinemaResizeMsg(t *testing.T, width, height int) []byte { +func resizeMsgBytes(t *testing.T, width, height int) []byte { t.Helper() - ch := CastHeader{ - Width: width, - Height: height, - } - bs, err := json.Marshal(ch) + bs, err := json.Marshal(spdyResizeMsg{Width: width, Height: height}) if err != nil { - t.Fatalf("error marshalling CastHeader: %v", err) + t.Fatalf("error marshalling resizeMsg: %v", err) } - return append(bs, '\n') -} - -type testConn struct { - net.Conn - // writeBuf contains whatever was send to the conn via Write. - writeBuf bytes.Buffer - // readBuf contains whatever was sent to the conn via Read. - readBuf bytes.Buffer - sync.RWMutex // protects the following - closed bool -} - -var _ net.Conn = &testConn{} - -func (tc *testConn) Read(b []byte) (int, error) { - return tc.readBuf.Read(b) -} - -func (tc *testConn) Write(b []byte) (int, error) { - return tc.writeBuf.Write(b) -} - -func (tc *testConn) Close() error { - tc.Lock() - defer tc.Unlock() - tc.closed = true - return nil -} -func (tc *testConn) isClosed() bool { - tc.Lock() - defer tc.Unlock() - return tc.closed -} - -type testSessionRecorder struct { - // buf holds data that was sent to the session recorder. - buf bytes.Buffer -} - -func (t *testSessionRecorder) Write(b []byte) (int, error) { - return t.buf.Write(b) -} - -func (t *testSessionRecorder) Close() error { - t.buf.Reset() - return nil + return bs } diff --git a/cmd/k8s-operator/spdy-frame.go b/k8s-operator/sessionrecording/spdy/frame.go similarity index 99% rename from cmd/k8s-operator/spdy-frame.go rename to k8s-operator/sessionrecording/spdy/frame.go index 0ddefdfa1e9ce..54b29d33a9622 100644 --- a/cmd/k8s-operator/spdy-frame.go +++ b/k8s-operator/sessionrecording/spdy/frame.go @@ -3,7 +3,7 @@ //go:build !plan9 -package main +package spdy import ( "bytes" diff --git a/cmd/k8s-operator/spdy-frame_test.go b/k8s-operator/sessionrecording/spdy/frame_test.go similarity index 90% rename from cmd/k8s-operator/spdy-frame_test.go rename to k8s-operator/sessionrecording/spdy/frame_test.go index 416ddfc8bc59d..4896cdcbf78a5 100644 --- a/cmd/k8s-operator/spdy-frame_test.go +++ b/k8s-operator/sessionrecording/spdy/frame_test.go @@ -3,17 +3,21 @@ //go:build !plan9 -package main +package spdy import ( "bytes" "compress/zlib" "encoding/binary" + "fmt" "io" "net/http" "reflect" "strings" "testing" + "time" + + "math/rand" "github.com/google/go-cmp/cmp" "go.uber.org/zap" @@ -200,6 +204,29 @@ func Test_spdyFrame_parseHeaders(t *testing.T) { } } +// Test_spdyFrame_ParseRand calls spdyFrame.Parse with randomly generated bytes +// to test that it doesn't panic. +func Test_spdyFrame_ParseRand(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range 100 { + n := r.Intn(4096) + b := make([]byte, n) + _, err := r.Read(b) + if err != nil { + t.Fatalf("error generating random byte slice: %v", err) + } + sf := &spdyFrame{} + f := func() { + sf.Parse(b, zl.Sugar()) + } + testPanic(t, f, fmt.Sprintf("[%d] Parse panicked running with byte slice of length %d: %v", i, n, r)) + } +} + // payload takes a control frame type and a map with 0 or more header keys and // values and returns a SPDY control frame payload with the header as SPDY zlib // compressed header name/value block. The payload is padded with arbitrary @@ -291,3 +318,13 @@ func header(hs map[string]string) http.Header { } return h } + +func testPanic(t *testing.T, f func(), msg string) { + t.Helper() + defer func() { + if r := recover(); r != nil { + t.Fatal(msg, r) + } + }() + f() +} diff --git a/cmd/k8s-operator/zlib-reader.go b/k8s-operator/sessionrecording/spdy/zlib-reader.go similarity index 99% rename from cmd/k8s-operator/zlib-reader.go rename to k8s-operator/sessionrecording/spdy/zlib-reader.go index b29772be3a7e0..1eb654be35632 100644 --- a/cmd/k8s-operator/zlib-reader.go +++ b/k8s-operator/sessionrecording/spdy/zlib-reader.go @@ -3,7 +3,7 @@ //go:build !plan9 -package main +package spdy import ( "bytes" diff --git a/cmd/k8s-operator/recorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go similarity index 70% rename from cmd/k8s-operator/recorder.go rename to k8s-operator/sessionrecording/tsrecorder/tsrecorder.go index ae17f382040db..30142e4bdd1a5 100644 --- a/cmd/k8s-operator/recorder.go +++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go @@ -3,7 +3,8 @@ //go:build !plan9 -package main +// Package tsrecorder contains functionality for connecting to a tsrecorder instance. +package tsrecorder import ( "encoding/json" @@ -16,9 +17,18 @@ import ( "tailscale.com/tstime" ) +func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool) *Client { + return &Client{ + start: start, + clock: clock, + conn: conn, + failOpen: failOpen, + } +} + // recorder knows how to send the provided bytes to the configured tsrecorder // instance in asciinema format. -type recorder struct { +type Client struct { start time.Time clock tstime.Clock @@ -36,7 +46,7 @@ type recorder struct { // Write appends timestamp to the provided bytes and sends them to the // configured tsrecorder. -func (rec *recorder) Write(p []byte) (err error) { +func (rec *Client) Write(p []byte) (err error) { if len(p) == 0 { return nil } @@ -52,7 +62,7 @@ func (rec *recorder) Write(p []byte) (err error) { return fmt.Errorf("error marhalling payload: %w", err) } j = append(j, '\n') - if err := rec.writeCastLine(j); err != nil { + if err := rec.WriteCastLine(j); err != nil { if !rec.failOpen { return fmt.Errorf("error writing payload to recorder: %w", err) } @@ -61,7 +71,7 @@ func (rec *recorder) Write(p []byte) (err error) { return nil } -func (rec *recorder) Close() error { +func (rec *Client) Close() error { rec.mu.Lock() defer rec.mu.Unlock() if rec.conn == nil { @@ -74,15 +84,20 @@ func (rec *recorder) Close() error { // writeCastLine sends bytes to the tsrecorder. The bytes should be in // asciinema format. -func (rec *recorder) writeCastLine(j []byte) error { - rec.mu.Lock() - defer rec.mu.Unlock() - if rec.conn == nil { +func (c *Client) WriteCastLine(j []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.conn == nil { return errors.New("recorder closed") } - _, err := rec.conn.Write(j) + _, err := c.conn.Write(j) if err != nil { return fmt.Errorf("recorder write error: %w", err) } return nil } + +type ResizeMsg struct { + Width int `json:"width"` + Height int `json:"height"` +} diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go new file mode 100644 index 0000000000000..82fd094d15364 --- /dev/null +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -0,0 +1,301 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// package ws has functionality to parse 'kubectl exec' sessions streamed using +// WebSocket protocol. +package ws + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "sync" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/remotecommand" + "tailscale.com/k8s-operator/sessionrecording/tsrecorder" + "tailscale.com/sessionrecording" + "tailscale.com/util/multierr" +) + +// New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. +// The connection must be a hijacked connection for a 'kubectl exec' session using WebSocket protocol and a *.channel.k8s.io subprotocol. +// The hijacked connection is used to transmit *.channel.k8s.io streams between Kubernetes client ('kubectl') and the destination proxy controlled by Kubernetes. +// Data read from the underlying network connection is data sent via one of the streams from the client to the container. +// Data written to the underlying connection is data sent from the container to the client. +// We parse the data and send everything for the STDOUT/STDERR streams to the configured tsrecorder as an asciinema recording with the provided header. +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#proposal-new-remotecommand-sub-protocol-version---v5channelk8sio +func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, log *zap.SugaredLogger) net.Conn { + return &conn{ + Conn: c, + rec: rec, + ch: ch, + log: log, + } +} + +// conn is a wrapper around net.Conn. It reads the bytestream +// for a 'kubectl exec' session, sends session recording data to the configured +// recorder and forwards the raw bytes to the original destination. +// A new conn is created per session. +// conn only knows to how to read a 'kubectl exec' session that is streamed using WebSocket protocol. +// https://www.rfc-editor.org/rfc/rfc6455 +type conn struct { + net.Conn + // rec knows how to send data to a tsrecorder instance. + rec *tsrecorder.Client + // ch is the asiinema CastHeader for a session. + ch sessionrecording.CastHeader + log *zap.SugaredLogger + + rmu sync.Mutex // sequences reads + // currentReadMsg contains parsed contents of a websocket binary data message that + // is currently being read from the underlying net.Conn. + currentReadMsg *message + // readBuf contains bytes for a currently parsed binary data message + // read from the underlying conn. If the message is masked, it is + // unmasked in place, so having this buffer allows us to avoid modifying + // the original byte array. + readBuf bytes.Buffer + + wmu sync.Mutex // sequences writes + writeCastHeaderOnce sync.Once + closed bool // connection is closed + // writeBuf contains bytes for a currently parsed binary data message + // being written to the underlying conn. If the message is masked, it is + // unmasked in place, so having this buffer allows us to avoid modifying + // the original byte array. + writeBuf bytes.Buffer + // currentWriteMsg contains parsed contents of a websocket binary data message that + // is currently being written to the underlying net.Conn. + currentWriteMsg *message +} + +// Read reads bytes from the original connection and parses them as websocket +// message fragments. +// Bytes read from the original connection are the bytes sent from the Kubernetes client (kubectl) to the destination container via kubelet. + +// If the message is for the resize stream, sets the width +// and height of the CastHeader for this connection. +// The fragment can be incomplete. +func (c *conn) Read(b []byte) (int, error) { + c.rmu.Lock() + defer c.rmu.Unlock() + n, err := c.Conn.Read(b) + if err != nil { + // It seems that we sometimes get a wrapped io.EOF, but the + // caller checks for io.EOF with ==. + if errors.Is(err, io.EOF) { + err = io.EOF + } + return 0, err + } + if n == 0 { + c.log.Debug("[unexpected] Read called for 0 length bytes") + return 0, nil + } + + typ := messageType(opcode(b)) + if (typ == noOpcode && c.readMsgIsIncomplete()) || c.readBufHasIncompleteFragment() { // subsequent fragment + if typ, err = c.curReadMsgType(); err != nil { + return 0, err + } + } + + // A control message can not be fragmented and we are not interested in + // these messages. Just return. + if isControlMessage(typ) { + return n, nil + } + + // The only data message type that Kubernetes supports is binary message. + // If we received another message type, return and let the API server close the connection. + // https://github.com/kubernetes/client-go/blob/release-1.30/tools/remotecommand/websocket.go#L281 + if typ != binaryMessage { + c.log.Infof("[unexpected] received a data message with a type that is not binary message type %v", typ) + return n, nil + } + + readMsg := &message{typ: typ} // start a new message... + // ... or pick up an already started one if the previous fragment was not final. + if c.readMsgIsIncomplete() || c.readBufHasIncompleteFragment() { + readMsg = c.currentReadMsg + } + + if _, err := c.readBuf.Write(b[:n]); err != nil { + return 0, fmt.Errorf("[unexpected] error writing message contents to read buffer: %w", err) + } + + ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) + if err != nil { + return 0, fmt.Errorf("error parsing message: %v", err) + } + if !ok { // incomplete fragment + return n, nil + } + c.readBuf.Next(len(readMsg.raw)) + + if readMsg.isFinalized { + // Stream IDs for websocket streams are static. + // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 + if readMsg.streamID.Load() == remotecommand.StreamResize { + var err error + var msg tsrecorder.ResizeMsg + if err = json.Unmarshal(readMsg.payload, &msg); err != nil { + return 0, fmt.Errorf("error umarshalling resize message: %w", err) + } + c.ch.Width = msg.Width + c.ch.Height = msg.Height + } + } + c.currentReadMsg = readMsg + return n, nil +} + +// Write parses the written bytes as WebSocket message fragment. If the message +// is for stdout or stderr streams, it is written to the configured tsrecorder. +// A message fragment can be incomplete. +func (c *conn) Write(b []byte) (int, error) { + c.wmu.Lock() + defer c.wmu.Unlock() + if len(b) == 0 { + c.log.Debug("[unexpected] Write called with 0 bytes") + return 0, nil + } + + typ := messageType(opcode(b)) + // If we are in process of parsing a message fragment, the received + // bytes are not structured as a message fragment and can not be used to + // determine a message fragment. + if c.writeBufHasIncompleteFragment() { // buffer contains previous incomplete fragment + var err error + if typ, err = c.curWriteMsgType(); err != nil { + return 0, err + } + } + + if isControlMessage(typ) { + return c.Conn.Write(b) + } + + writeMsg := &message{typ: typ} // start a new message... + // ... or continue the existing one if it has not been finalized. + if c.writeMsgIsIncomplete() || c.writeBufHasIncompleteFragment() { + writeMsg = c.currentWriteMsg + } + + if _, err := c.writeBuf.Write(b); err != nil { + c.log.Errorf("write: error writing to write buf: %v", err) + return 0, fmt.Errorf("[unexpected] error writing to internal write buffer: %w", err) + } + + ok, err := writeMsg.Parse(c.writeBuf.Bytes(), c.log) + if err != nil { + c.log.Errorf("write: parsing a message errored: %v", err) + return 0, fmt.Errorf("write: error parsing message: %v", err) + } + c.currentWriteMsg = writeMsg + if !ok { // incomplete fragment + return len(b), nil + } + c.writeBuf.Next(len(writeMsg.raw)) // advance frame + + if len(writeMsg.payload) != 0 && writeMsg.isFinalized { + if writeMsg.streamID.Load() == remotecommand.StreamStdOut || writeMsg.streamID.Load() == remotecommand.StreamStdErr { + var err error + c.writeCastHeaderOnce.Do(func() { + var j []byte + j, err = json.Marshal(c.ch) + if err != nil { + c.log.Errorf("error marhsalling conn: %v", err) + return + } + j = append(j, '\n') + err = c.rec.WriteCastLine(j) + if err != nil { + c.log.Errorf("received error from recorder: %v", err) + } + }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } + if err := c.rec.Write(writeMsg.payload); err != nil { + return 0, fmt.Errorf("error writing message to recorder: %v", err) + } + } + } + _, err = c.Conn.Write(c.currentWriteMsg.raw) + if err != nil { + c.log.Errorf("write: error writing to conn: %v", err) + } + return len(b), nil +} + +func (c *conn) Close() error { + c.wmu.Lock() + defer c.wmu.Unlock() + if c.closed { + return nil + } + c.closed = true + connCloseErr := c.Conn.Close() + recCloseErr := c.rec.Close() + return multierr.New(connCloseErr, recCloseErr) +} + +// writeBufHasIncompleteFragment returns true if the latest data message +// fragment written to the connection was incomplete and the following write +// must be the remaining payload bytes of that fragment. +func (c *conn) writeBufHasIncompleteFragment() bool { + return c.writeBuf.Len() != 0 +} + +// readBufHasIncompleteFragment returns true if the latest data message +// fragment read from the connection was incomplete and the following read +// must be the remaining payload bytes of that fragment. +func (c *conn) readBufHasIncompleteFragment() bool { + return c.readBuf.Len() != 0 +} + +// writeMsgIsIncomplete returns true if the latest WebSocket message written to +// the connection was fragmented and the next data message fragment written to +// the connection must be a fragment of that message. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.4 +func (c *conn) writeMsgIsIncomplete() bool { + return c.currentWriteMsg != nil && !c.currentWriteMsg.isFinalized +} + +// readMsgIsIncomplete returns true if the latest WebSocket message written to +// the connection was fragmented and the next data message fragment written to +// the connection must be a fragment of that message. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.4 +func (c *conn) readMsgIsIncomplete() bool { + return c.currentReadMsg != nil && !c.currentReadMsg.isFinalized +} +func (c *conn) curReadMsgType() (messageType, error) { + if c.currentReadMsg != nil { + return c.currentReadMsg.typ, nil + } + return 0, errors.New("[unexpected] attempted to determine type for nil message") +} + +func (c *conn) curWriteMsgType() (messageType, error) { + if c.currentWriteMsg != nil { + return c.currentWriteMsg.typ, nil + } + return 0, errors.New("[unexpected] attempted to determine type for nil message") +} + +// opcode reads the websocket message opcode that denotes the message type. +// opcode is contained in bits [4-8] of the message. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.2 +func opcode(b []byte) int { + // 0xf = 00001111; b & 00001111 zeroes out bits [0 - 3] of b + var mask byte = 0xf + return int(b[0] & mask) +} diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go new file mode 100644 index 0000000000000..2fcbeb7cabdc1 --- /dev/null +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -0,0 +1,257 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package ws + +import ( + "fmt" + "reflect" + "testing" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/remotecommand" + "tailscale.com/k8s-operator/sessionrecording/fakes" + "tailscale.com/k8s-operator/sessionrecording/tsrecorder" + "tailscale.com/sessionrecording" + "tailscale.com/tstest" +) + +func Test_conn_Read(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + // Resize stream ID + {"width": 10, "height": 20} + testResizeMsg := []byte{byte(remotecommand.StreamResize), 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d} + lenResizeMsgPayload := byte(len(testResizeMsg)) + + tests := []struct { + name string + inputs [][]byte + wantWidth int + wantHeight int + }{ + { + name: "single_read_control_message", + inputs: [][]byte{{0x88, 0x0}}, + }, + { + name: "single_read_resize_message", + inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "two_reads_resize_message", + inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "three_reads_resize_message_with_split_fragment", + inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, + wantWidth: 10, + wantHeight: 20, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := &fakes.TestConn{} + tc.ResetReadBuf() + c := &conn{ + Conn: tc, + log: zl.Sugar(), + } + for i, input := range tt.inputs { + if err := tc.WriteReadBufBytes(input); err != nil { + t.Fatalf("writing bytes to test conn: %v", err) + } + _, err := c.Read(make([]byte, len(input))) + if err != nil { + t.Errorf("[%d] conn.Read() errored %v", i, err) + return + } + } + if tt.wantHeight != 0 || tt.wantWidth != 0 { + if tt.wantWidth != c.ch.Width { + t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width) + } + if tt.wantHeight != c.ch.Height { + t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) + } + } + }) + } +} + +func Test_conn_Write(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + tests := []struct { + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + firstWrite bool + width int + height int + }{ + { + name: "single_write_control_frame", + inputs: [][]byte{{0x88, 0x0}}, + wantForwarded: []byte{0x88, 0x0}, + }, + { + name: "single_write_stdout_data_message", + inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}}, + wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl), + }, + { + name: "single_write_stderr_data_message", + inputs: [][]byte{{0x82, 0x3, 0x2, 0x7, 0x8}}, + wantForwarded: []byte{0x82, 0x3, 0x2, 0x7, 0x8}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl), + }, + { + name: "single_write_stdin_data_message", + inputs: [][]byte{{0x82, 0x3, 0x0, 0x7, 0x8}}, + wantForwarded: []byte{0x82, 0x3, 0x0, 0x7, 0x8}, + }, + { + name: "single_write_stdout_data_message_with_cast_header", + inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}}, + wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8}, + wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8}, cl)...), + width: 10, + height: 20, + firstWrite: true, + }, + { + name: "two_writes_stdout_data_message", + inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), + }, + { + name: "three_writes_stdout_data_message_with_split_fragment", + inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, + wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := &fakes.TestConn{} + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true) + c := &conn{ + Conn: tc, + log: zl.Sugar(), + ch: sessionrecording.CastHeader{ + Width: tt.width, + Height: tt.height, + }, + rec: rec, + } + if !tt.firstWrite { + // This test case does not intend to test that cast header gets written once. + c.writeCastHeaderOnce.Do(func() {}) + } + for i, input := range tt.inputs { + _, err := c.Write(input) + if err != nil { + t.Fatalf("[%d] conn.Write() errored: %v", i, err) + } + } + // Assert that the expected bytes have been forwarded to the original destination. + gotForwarded := tc.WriteBufBytes() + if !reflect.DeepEqual(gotForwarded, tt.wantForwarded) { + t.Errorf("expected bytes not forwarded, wants\n%x\ngot\n%x", tt.wantForwarded, gotForwarded) + } + + // Assert that the expected bytes have been forwarded to the session recorder. + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%b\ngot\n%b", tt.wantRecorded, gotRecorded) + } + }) + } +} + +// Test_conn_ReadRand tests reading arbitrarily generated byte slices from conn to +// test that we don't panic when parsing input from a broken or malicious +// client. +func Test_conn_ReadRand(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("error creating a test logger: %v", err) + } + for i := range 100 { + tc := &fakes.TestConn{} + tc.ResetReadBuf() + c := &conn{ + Conn: tc, + log: zl.Sugar(), + } + bb := fakes.RandomBytes(t) + for j, input := range bb { + if err := tc.WriteReadBufBytes(input); err != nil { + t.Fatalf("[%d] writing bytes to test conn: %v", i, err) + } + f := func() { + c.Read(make([]byte, len(input))) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Read panic parsing input of length %d first bytes: %v, current read message: %+#v", i, j, len(input), firstBytes(input), c.currentReadMsg)) + } + } +} + +// Test_conn_WriteRand calls conn.Write with an arbitrary input to validate that it does not +// panic. +func Test_conn_WriteRand(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("error creating a test logger: %v", err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true) + for i := range 100 { + tc := &fakes.TestConn{} + c := &conn{ + Conn: tc, + log: zl.Sugar(), + rec: rec, + } + bb := fakes.RandomBytes(t) + for j, input := range bb { + f := func() { + c.Write(input) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) + } + } +} + +func testPanic(t *testing.T, f func(), msg string) { + t.Helper() + defer func() { + if r := recover(); r != nil { + t.Fatal(msg, r) + } + }() + f() +} + +func firstBytes(b []byte) []byte { + if len(b) < 10 { + return b + } + return b[:10] +} diff --git a/k8s-operator/sessionrecording/ws/message.go b/k8s-operator/sessionrecording/ws/message.go new file mode 100644 index 0000000000000..713febec76ae8 --- /dev/null +++ b/k8s-operator/sessionrecording/ws/message.go @@ -0,0 +1,267 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package ws + +import ( + "encoding/binary" + "fmt" + "sync/atomic" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "golang.org/x/net/websocket" +) + +const ( + noOpcode messageType = 0 // continuation frame for fragmented messages + binaryMessage messageType = 2 +) + +// messageType is the type of a websocket data or control message as defined by opcode. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.2 +// Known types of control messages are close, ping and pong. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.5 +// The only data message type supported by Kubernetes is binary message +// https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L281 +type messageType int + +// message is a parsed Websocket Message. +type message struct { + // payload is the contents of the so far parsed Websocket + // data Message payload, potentially from multiple fragments written by + // multiple invocations of Parse. As per RFC 6455 We can assume that the + // fragments will always arrive in order and data messages will not be + // interleaved. + payload []byte + + // isFinalized is set to true if msgPayload contains full contents of + // the message (the final fragment has been received). + isFinalized bool + + // streamID is the stream to which the message belongs, i.e stdin, stout + // etc. It is one of the stream IDs defined in + // https://github.com/kubernetes/apimachinery/blob/73d12d09c5be8703587b5127416eb83dc3b7e182/pkg/util/httpstream/wsstream/doc.go#L23-L36 + streamID atomic.Uint32 + + // typ is the type of a WebsocketMessage as defined by its opcode + // https://www.rfc-editor.org/rfc/rfc6455#section-5.2 + typ messageType + raw []byte +} + +// Parse accepts a websocket message fragment as a byte slice and parses its contents. +// It returns true if the fragment is complete, false if the fragment is incomplete. +// If the fragment is incomplete, Parse will be called again with the same fragment + more bytes when those are received. +// If the fragment is complete, it will be parsed into msg. +// A complete fragment can be: +// - a fragment that consists of a whole message +// - an initial fragment for a message for which we expect more fragments +// - a subsequent fragment for a message that we are currently parsing and whose so-far parsed contents are stored in msg. +// Parse must not be called with bytes that don't contain fragment header (so, no less than 2 bytes). +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-------+-+-------------+-------------------------------+ +// |F|R|R|R| opcode|M| Payload len | Extended payload length | +// |I|S|S|S| (4) |A| (7) | (16/64) | +// |N|V|V|V| |S| | (if payload len==126/127) | +// | |1|2|3| |K| | | +// +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + +// | Extended payload length continued, if payload len == 127 | +// + - - - - - - - - - - - - - - - +-------------------------------+ +// | |Masking-key, if MASK set to 1 | +// +-------------------------------+-------------------------------+ +// | Masking-key (continued) | Payload Data | +// +-------------------------------- - - - - - - - - - - - - - - - + +// : Payload Data continued ... : +// + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +// | Payload Data continued ... | +// +---------------------------------------------------------------+ +// https://www.rfc-editor.org/rfc/rfc6455#section-5.2 +// +// Fragmentation rules: +// An unfragmented message consists of a single frame with the FIN +// bit set (Section 5.2) and an opcode other than 0. +// A fragmented message consists of a single frame with the FIN bit +// clear and an opcode other than 0, followed by zero or more frames +// with the FIN bit clear and the opcode set to 0, and terminated by +// a single frame with the FIN bit set and an opcode of 0. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.4 +func (msg *message) Parse(b []byte, log *zap.SugaredLogger) (bool, error) { + if len(b) < 2 { + return false, fmt.Errorf("[unexpected] Parse should not be called with less than 2 bytes, got %d bytes", len(b)) + } + if msg.typ != binaryMessage { + return false, fmt.Errorf("[unexpected] internal error: attempted to parse a message with type %d", msg.typ) + } + isInitialFragment := len(msg.raw) == 0 + + msg.isFinalized = isFinalFragment(b) + + maskSet := isMasked(b) + + payloadLength, payloadOffset, maskOffset, err := fragmentDimensions(b, maskSet) + if err != nil { + return false, fmt.Errorf("error determining payload length: %w", err) + } + log.Debugf("parse: parsing a message fragment with payload length: %d payload offset: %d maskOffset: %d mask set: %t, is finalized: %t, is initial fragment: %t", payloadLength, payloadOffset, maskOffset, maskSet, msg.isFinalized, isInitialFragment) + + if len(b) < int(payloadOffset+payloadLength) { // incomplete fragment + return false, nil + } + // TODO (irbekrm): perhaps only do this extra allocation if we know we + // will need to unmask? + msg.raw = make([]byte, int(payloadOffset)+int(payloadLength)) + copy(msg.raw, b[:payloadOffset+payloadLength]) + + // Extract the payload. + msgPayload := b[payloadOffset : payloadOffset+payloadLength] + + // Unmask the payload if needed. + // TODO (irbekrm): instead of unmasking all of the payload each time, + // determine if the payload is for a resize message early and skip + // unmasking the remaining bytes if not. + if maskSet { + m := b[maskOffset:payloadOffset] + var mask [4]byte + copy(mask[:], m) + maskBytes(mask, msgPayload) + } + + // Determine what stream the message is for. Stream ID of a Kubernetes + // streaming session is a 32bit integer, stored in the first byte of the + // message payload. + // https://github.com/kubernetes/apimachinery/commit/73d12d09c5be8703587b5127416eb83dc3b7e182#diff-291f96e8632d04d2d20f5fb00f6b323492670570d65434e8eac90c7a442d13bdR23-R36 + if len(msgPayload) == 0 { + return false, errors.New("[unexpected] received a message fragment with no stream ID") + } + + streamID := uint32(msgPayload[0]) + if !isInitialFragment && msg.streamID.Load() != streamID { + return false, fmt.Errorf("[unexpected] received message fragments with mismatched streamIDs %d and %d", msg.streamID.Load(), streamID) + } + msg.streamID.Store(streamID) + + // This is normal, Kubernetes seem to send a couple data messages with + // no payloads at the start. + if len(msgPayload) < 2 { + return true, nil + } + msgPayload = msgPayload[1:] // remove the stream ID byte + msg.payload = append(msg.payload, msgPayload...) + return true, nil +} + +// maskBytes applies mask to bytes in place. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.3 +func maskBytes(key [4]byte, b []byte) { + for i := range b { + b[i] = b[i] ^ key[i%4] + } +} + +// isControlMessage returns true if the message type is one of the known control +// frame message types. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.5 +func isControlMessage(t messageType) bool { + const ( + closeMessage messageType = 8 + pingMessage messageType = 9 + pongMessage messageType = 10 + ) + return t == closeMessage || t == pingMessage || t == pongMessage +} + +// isFinalFragment can be called with websocket message fragment and returns true if +// the fragment is the final fragment of a websocket message. +func isFinalFragment(b []byte) bool { + return extractFirstBit(b[0]) != 0 +} + +// isMasked can be called with a websocket message fragment and returns true if +// the payload of the message is masked. It uses the mask bit to determine if +// the payload is masked. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.3 +func isMasked(b []byte) bool { + return extractFirstBit(b[1]) != 0 +} + +// extractFirstBit extracts first bit of a byte by zeroing out all the other +// bits. +func extractFirstBit(b byte) byte { + return b & 0x80 +} + +// zeroFirstBit returns the provided byte with the first bit set to 0. +func zeroFirstBit(b byte) byte { + return b & 0x7f +} + +// fragmentDimensions returns payload length as well as payload offset and mask offset. +func fragmentDimensions(b []byte, maskSet bool) (payloadLength, payloadOffset, maskOffset uint64, _ error) { + + // payload length can be stored either in bits [9-15] or in bytes 2, 3 + // or in bytes 2, 3, 4, 5, 6, 7. + // https://www.rfc-editor.org/rfc/rfc6455#section-5.2 + // 0 1 2 3 + // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + // +-+-+-+-+-------+-+-------------+-------------------------------+ + // |F|R|R|R| opcode|M| Payload len | Extended payload length | + // |I|S|S|S| (4) |A| (7) | (16/64) | + // |N|V|V|V| |S| | (if payload len==126/127) | + // | |1|2|3| |K| | | + // +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + + // | Extended payload length continued, if payload len == 127 | + // + - - - - - - - - - - - - - - - +-------------------------------+ + // | |Masking-key, if MASK set to 1 | + // +-------------------------------+-------------------------------+ + payloadLengthIndicator := zeroFirstBit(b[1]) + switch { + case payloadLengthIndicator < 126: + maskOffset = 2 + payloadLength = uint64(payloadLengthIndicator) + case payloadLengthIndicator == 126: + maskOffset = 4 + if len(b) < int(maskOffset) { + return 0, 0, 0, fmt.Errorf("invalid message fragment- length indicator suggests that length is stored in bytes 2:4, but message length is only %d", len(b)) + } + payloadLength = uint64(binary.BigEndian.Uint16(b[2:4])) + case payloadLengthIndicator == 127: + maskOffset = 10 + if len(b) < int(maskOffset) { + return 0, 0, 0, fmt.Errorf("invalid message fragment- length indicator suggests that length is stored in bytes 2:10, but message length is only %d", len(b)) + } + payloadLength = binary.BigEndian.Uint64(b[2:10]) + default: + return 0, 0, 0, fmt.Errorf("unexpected payload length indicator value: %v", payloadLengthIndicator) + } + + // Ensure that a rogue or broken client doesn't cause us attempt to + // allocate a huge array by setting a high payload size. + // websocket.DefaultMaxPayloadBytes is the maximum payload size accepted + // by server side of this connection, so we can safely reject messages + // with larger payload size. + if payloadLength > websocket.DefaultMaxPayloadBytes { + return 0, 0, 0, fmt.Errorf("[unexpected]: too large payload size: %v", payloadLength) + } + + // Masking key can take up 0 or 4 bytes- we need to take that into + // account when determining payload offset. + // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + // .... + // + - - - - - - - - - - - - - - - +-------------------------------+ + // | |Masking-key, if MASK set to 1 | + // +-------------------------------+-------------------------------+ + // | Masking-key (continued) | Payload Data | + // + - - - - - - - - - - - - - - - +-------------------------------+ + // ... + if maskSet { + payloadOffset = maskOffset + 4 + } else { + payloadOffset = maskOffset + } + return +} diff --git a/k8s-operator/sessionrecording/ws/message_test.go b/k8s-operator/sessionrecording/ws/message_test.go new file mode 100644 index 0000000000000..f634f86dc55c2 --- /dev/null +++ b/k8s-operator/sessionrecording/ws/message_test.go @@ -0,0 +1,215 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package ws + +import ( + "encoding/binary" + "fmt" + "reflect" + "testing" + "time" + + "math/rand" + + "go.uber.org/zap" + "golang.org/x/net/websocket" +) + +func Test_msg_Parse(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("error creating a test logger: %v", err) + } + testMask := [4]byte{1, 2, 3, 4} + bs126, bs126Len := bytesSlice2ByteLen(t) + bs127, bs127Len := byteSlice8ByteLen(t) + tests := []struct { + name string + b []byte + initialPayload []byte + wantPayload []byte + wantIsFinalized bool + wantStreamID uint32 + wantErr bool + }{ + { + name: "single_fragment_stdout_stream_no_payload_no_mask", + b: []byte{0x82, 0x1, 0x1}, + wantPayload: nil, + wantIsFinalized: true, + wantStreamID: 1, + }, + { + name: "single_fragment_stderr_steam_no_payload_has_mask", + b: append([]byte{0x82, 0x81, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x2})...), + wantPayload: nil, + wantIsFinalized: true, + wantStreamID: 2, + }, + { + name: "single_fragment_stdout_stream_no_mask_has_payload", + b: []byte{0x82, 0x3, 0x1, 0x7, 0x8}, + wantPayload: []byte{0x7, 0x8}, + wantIsFinalized: true, + wantStreamID: 1, + }, + { + name: "single_fragment_stdout_stream_has_mask_has_payload", + b: append([]byte{0x82, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...), + wantPayload: []byte{0x7, 0x8}, + wantIsFinalized: true, + wantStreamID: 1, + }, + { + name: "initial_fragment_stdout_stream_no_mask_has_payload", + b: []byte{0x2, 0x3, 0x1, 0x7, 0x8}, + wantPayload: []byte{0x7, 0x8}, + wantStreamID: 1, + }, + { + name: "initial_fragment_stdout_stream_has_mask_has_payload", + b: append([]byte{0x2, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...), + wantPayload: []byte{0x7, 0x8}, + wantStreamID: 1, + }, + { + name: "subsequent_fragment_stdout_stream_no_mask_has_payload", + b: []byte{0x0, 0x3, 0x1, 0x7, 0x8}, + initialPayload: []byte{0x1, 0x2, 0x3}, + wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8}, + wantStreamID: 1, + }, + { + name: "subsequent_fragment_stdout_stream_has_mask_has_payload", + b: append([]byte{0x0, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...), + initialPayload: []byte{0x1, 0x2, 0x3}, + wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8}, + wantStreamID: 1, + }, + { + name: "final_fragment_stdout_stream_no_mask_has_payload", + b: []byte{0x80, 0x3, 0x1, 0x7, 0x8}, + initialPayload: []byte{0x1, 0x2, 0x3}, + wantIsFinalized: true, + wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8}, + wantStreamID: 1, + }, + { + name: "final_fragment_stdout_stream_has_mask_has_payload", + b: append([]byte{0x80, 0x83, 0x1, 0x2, 0x3, 0x4}, maskedBytes(testMask, []byte{0x1, 0x7, 0x8})...), + initialPayload: []byte{0x1, 0x2, 0x3}, + wantIsFinalized: true, + wantPayload: []byte{0x1, 0x2, 0x3, 0x7, 0x8}, + wantStreamID: 1, + }, + { + name: "single_large_fragment_no_mask_length_hint_126", + b: append(append([]byte{0x80, 0x7e}, bs126Len...), append([]byte{0x1}, bs126...)...), + wantIsFinalized: true, + wantPayload: bs126, + wantStreamID: 1, + }, + { + name: "single_large_fragment_no_mask_length_hint_127", + b: append(append([]byte{0x80, 0x7f}, bs127Len...), append([]byte{0x1}, bs127...)...), + wantIsFinalized: true, + wantPayload: bs127, + wantStreamID: 1, + }, + { + name: "zero_length_bytes", + b: []byte{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + msg := &message{ + typ: binaryMessage, + payload: tt.initialPayload, + } + if _, err := msg.Parse(tt.b, zl.Sugar()); (err != nil) != tt.wantErr { + t.Errorf("msg.Parse() = %v, wantsErr: %t", err, tt.wantErr) + } + if msg.isFinalized != tt.wantIsFinalized { + t.Errorf("wants message to be finalized: %t, got: %t", tt.wantIsFinalized, msg.isFinalized) + } + if msg.streamID.Load() != tt.wantStreamID { + t.Errorf("wants stream ID: %d, got: %d", tt.wantStreamID, msg.streamID.Load()) + } + if !reflect.DeepEqual(msg.payload, tt.wantPayload) { + t.Errorf("unexpected message payload after Parse, wants %b got %b", tt.wantPayload, msg.payload) + } + }) + } +} + +// Test_msg_Parse_Rand calls Parse with a randomly generated input to verify +// that it doesn't panic. +func Test_msg_Parse_Rand(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatalf("error creating a test logger: %v", err) + } + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range 100 { + n := r.Intn(4096) + b := make([]byte, n) + _, err := r.Read(b) + if err != nil { + t.Fatalf("error generating random byte slice: %v", err) + } + msg := message{typ: binaryMessage} + f := func() { + msg.Parse(b, zl.Sugar()) + } + testPanic(t, f, fmt.Sprintf("[%d] Parse panicked running with byte slice of length %d: %v", i, n, r)) + } +} + +// byteSlice2ByteLen generates a number that represents websocket message fragment length and is stored in an 8 byte slice. +// Returns the byte slice with the length as well as a slice of arbitrary bytes of the given length. +// This is used to generate test input representing websocket message with payload length hint 126. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.2 +func bytesSlice2ByteLen(t *testing.T) ([]byte, []byte) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + var n uint16 + n = uint16(rand.Intn(65535 - 1)) // space for and additional 1 byte stream ID + b := make([]byte, n) + _, err := r.Read(b) + if err != nil { + t.Fatalf("error generating random byte slice: %v ", err) + } + bb := make([]byte, 2) + binary.BigEndian.PutUint16(bb, n+1) // + stream ID + return b, bb +} + +// byteSlice8ByteLen generates a number that represents websocket message fragment length and is stored in an 8 byte slice. +// Returns the byte slice with the length as well as a slice of arbitrary bytes of the given length. +// This is used to generate test input representing websocket message with payload length hint 127. +// https://www.rfc-editor.org/rfc/rfc6455#section-5.2 +func byteSlice8ByteLen(t *testing.T) ([]byte, []byte) { + nanos := time.Now().UnixNano() + t.Logf("Creating random source with seed %v", nanos) + r := rand.New(rand.NewSource(nanos)) + var n uint64 + n = uint64(rand.Intn(websocket.DefaultMaxPayloadBytes - 1)) // space for and additional 1 byte stream ID + t.Logf("byteSlice8ByteLen: generating message payload of length %d", n) + b := make([]byte, n) + _, err := r.Read(b) + if err != nil { + t.Fatalf("error generating random byte slice: %v ", err) + } + bb := make([]byte, 8) + binary.BigEndian.PutUint64(bb, n+1) // + stream ID + return b, bb +} + +func maskedBytes(mask [4]byte, b []byte) []byte { + maskBytes(mask, b) + return b +} diff --git a/licenses/android.md b/licenses/android.md index f46a8d270af67..64e321de6a9ed 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -60,8 +60,8 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/2f5d148bcfe1/LICENSE)) - - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE)) + - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) @@ -71,18 +71,18 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/c58ccf4b:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.18.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.22.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/ee1e1f6070e3/LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.23.0:LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket/blob/v1.8.10/LICENSE.txt)) + - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket-old/blob/v1.8.10/LICENSE.txt)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 98c7405889070..5dd9b975c0f15 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -29,6 +29,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.2/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) + - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) @@ -65,8 +66,8 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/2f5d148bcfe1/LICENSE)) - - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE)) + - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) @@ -74,16 +75,15 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/ee1e1f6070e3/LICENSE)) - - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket/blob/v1.8.10/LICENSE.txt)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) ## Additional Dependencies diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 06620be74665f..2fd07fc3f058e 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -34,8 +34,9 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) + - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.21/LICENSE)) + - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) @@ -84,8 +85,8 @@ Some packages may only be included on certain architectures or operating systems - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/2f5d148bcfe1/LICENSE)) - - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE)) + - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.12.0/LICENSE)) @@ -95,20 +96,19 @@ Some packages may only be included on certain architectures or operating systems - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.16.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/ee1e1f6070e3/LICENSE)) - - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.30.1/LICENSE)) - - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket/blob/v1.8.10/LICENSE.txt)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) + - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.30.3/LICENSE)) - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) - [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 9f640b830a95e..77281a7ab2ac3 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -57,23 +57,23 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/7601212d8e23/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/4327221bd339/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/6580b55d49ca/LICENSE)) - - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/62b9a7c569f9/LICENSE)) + - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.24.0:LICENSE)) - - [golang.org/x/exp/constraints](https://pkg.go.dev/golang.org/x/exp/constraints) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/fe59bbe5:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.18.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.18.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.26.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.21.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.21.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 71093dd3c1987..f40ede86a7235 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -31,6 +31,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/hostinfo" "tailscale.com/log/filelogger" "tailscale.com/logtail" "tailscale.com/logtail/filch" @@ -463,6 +464,11 @@ func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { + if hostinfo.IsNATLabGuestVM() { + // In NATLab Gokrazy instances, tailscaled comes up concurently with + // DHCP and the doesn't have DNS for a while. Wait for DHCP first. + awaitGokrazyNetwork() + } var lflags int if term.IsTerminal(2) || runtime.GOOS == "windows" { lflags = 0 @@ -816,3 +822,25 @@ func (noopPretendSuccessTransport) RoundTrip(req *http.Request) (*http.Response, Status: "200 OK", }, nil } + +func awaitGokrazyNetwork() { + if runtime.GOOS != "linux" || distro.Get() != distro.Gokrazy { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + for { + // Before DHCP finishes, the /etc/resolv.conf file has just "#MANUAL". + all, _ := os.ReadFile("/etc/resolv.conf") + if bytes.Contains(all, []byte("nameserver ")) { + return + } + select { + case <-ctx.Done(): + return + case <-time.After(500 * time.Millisecond): + } + } +} diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go new file mode 100644 index 0000000000000..e0a4b0a250768 --- /dev/null +++ b/net/captivedetection/captivedetection.go @@ -0,0 +1,223 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package captivedetection provides a way to detect if the system is connected to a network that has +// a captive portal. It does this by making HTTP requests to known captive portal detection endpoints +// and checking if the HTTP responses indicate that a captive portal might be present. +package captivedetection + +import ( + "context" + "net" + "net/http" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "tailscale.com/net/netmon" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" +) + +// Detector checks whether the system is behind a captive portal. +type Detector struct { + + // httpClient is the HTTP client that is used for captive portal detection. It is configured + // to not follow redirects, have a short timeout and no keep-alive. + httpClient *http.Client + // currIfIndex is the index of the interface that is currently being used by the httpClient. + currIfIndex int + // mu guards currIfIndex. + mu sync.Mutex + // logf is the logger used for logging messages. If it is nil, log.Printf is used. + logf logger.Logf +} + +// NewDetector creates a new Detector instance for captive portal detection. +func NewDetector(logf logger.Logf) *Detector { + d := &Detector{logf: logf} + d.httpClient = &http.Client{ + // No redirects allowed + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + Transport: &http.Transport{ + DialContext: d.dialContext, + DisableKeepAlives: true, + }, + Timeout: Timeout, + } + return d +} + +// Timeout is the timeout for captive portal detection requests. Because the captive portal intercepting our requests +// is usually located on the LAN, this is a relatively short timeout. +const Timeout = 3 * time.Second + +// Detect is the entry point to the API. It attempts to detect if the system is behind a captive portal +// by making HTTP requests to known captive portal detection Endpoints. If any of the requests return a response code +// or body that looks like a captive portal, Detect returns true. It returns false in all other cases, including when any +// error occurs during a detection attempt. +// +// This function might take a while to return, as it will attempt to detect a captive portal on all available interfaces +// by performing multiple HTTP requests. It should be called in a separate goroutine if you want to avoid blocking. +func (d *Detector) Detect(ctx context.Context, netMon *netmon.Monitor, derpMap *tailcfg.DERPMap, preferredDERPRegionID int) (found bool) { + return d.detectCaptivePortalWithGOOS(ctx, netMon, derpMap, preferredDERPRegionID, runtime.GOOS) +} + +func (d *Detector) detectCaptivePortalWithGOOS(ctx context.Context, netMon *netmon.Monitor, derpMap *tailcfg.DERPMap, preferredDERPRegionID int, goos string) (found bool) { + ifState := netMon.InterfaceState() + if !ifState.AnyInterfaceUp() { + d.logf("[v2] DetectCaptivePortal: no interfaces up, returning false") + return false + } + + endpoints := availableEndpoints(derpMap, preferredDERPRegionID, d.logf, goos) + + // Here we try detecting a captive portal using *all* available interfaces on the system + // that have a IPv4 address. We consider to have found a captive portal when any interface + // reports one may exists. This is necessary because most systems have multiple interfaces, + // and most importantly on macOS no default route interface is set until the user has accepted + // the captive portal alert thrown by the system. If no default route interface is known, + // we need to try with anything that might remotely resemble a Wi-Fi interface. + for ifName, i := range ifState.Interface { + if !i.IsUp() || i.IsLoopback() || interfaceNameDoesNotNeedCaptiveDetection(ifName, goos) { + continue + } + addrs, err := i.Addrs() + if err != nil { + d.logf("[v1] DetectCaptivePortal: failed to get addresses for interface %s: %v", ifName, err) + continue + } + if len(addrs) == 0 { + continue + } + d.logf("[v2] attempting to do captive portal detection on interface %s", ifName) + res := d.detectOnInterface(ctx, i.Index, endpoints) + if res { + d.logf("DetectCaptivePortal(found=true,ifName=%s)", ifName) + return true + } + } + + d.logf("DetectCaptivePortal(found=false)") + return false +} + +// interfaceNameDoesNotNeedCaptiveDetection returns true if an interface does not require captive portal detection +// based on its name. This is useful to avoid making unnecessary HTTP requests on interfaces that are known to not +// require it. We also avoid making requests on the interface prefixes "pdp" and "rmnet", which are cellular data +// interfaces on iOS and Android, respectively, and would be needlessly battery-draining. +func interfaceNameDoesNotNeedCaptiveDetection(ifName string, goos string) bool { + ifName = strings.ToLower(ifName) + excludedPrefixes := []string{"tailscale", "tun", "tap", "docker", "kube", "wg"} + if goos == "windows" { + excludedPrefixes = append(excludedPrefixes, "loopback", "tunnel", "ppp", "isatap", "teredo", "6to4") + } else if goos == "darwin" || goos == "ios" { + excludedPrefixes = append(excludedPrefixes, "pdp", "awdl", "bridge", "ap", "utun", "tap", "llw", "anpi", "lo", "stf", "gif", "xhc", "pktap") + } else if goos == "android" { + excludedPrefixes = append(excludedPrefixes, "rmnet", "p2p", "dummy", "sit") + } + for _, prefix := range excludedPrefixes { + if strings.HasPrefix(ifName, prefix) { + return true + } + } + return false +} + +// detectOnInterface reports whether or not we think the system is behind a +// captive portal, detected by making a request to a URL that we know should +// return a "204 No Content" response and checking if that's what we get. +// +// The boolean return is whether we think we have a captive portal. +func (d *Detector) detectOnInterface(ctx context.Context, ifIndex int, endpoints []Endpoint) bool { + defer d.httpClient.CloseIdleConnections() + + d.logf("[v2] %d available captive portal detection endpoints: %v", len(endpoints), endpoints) + + // We try to detect the captive portal more quickly by making requests to multiple endpoints concurrently. + var wg sync.WaitGroup + resultCh := make(chan bool, len(endpoints)) + + for i, e := range endpoints { + if i >= 5 { + // Try a maximum of 5 endpoints, break out (returning false) if we run of attempts. + break + } + wg.Add(1) + go func(endpoint Endpoint) { + defer wg.Done() + found, err := d.verifyCaptivePortalEndpoint(ctx, endpoint, ifIndex) + if err != nil { + d.logf("[v1] checkCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err) + return + } + if found { + resultCh <- true + } + }(e) + } + + go func() { + wg.Wait() + close(resultCh) + }() + + for result := range resultCh { + if result { + // If any of the endpoints seems to be a captive portal, we consider the system to be behind one. + return true + } + } + + return false +} + +// verifyCaptivePortalEndpoint checks if the given Endpoint is a captive portal by making an HTTP request to the +// given Endpoint URL using the interface with index ifIndex, and checking if the response looks like a captive portal. +func (d *Detector) verifyCaptivePortalEndpoint(ctx context.Context, e Endpoint, ifIndex int) (found bool, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", e.URL.String(), nil) + if err != nil { + return false, err + } + + // Attach the Tailscale challenge header if the endpoint supports it. Not all captive portal detection endpoints + // support this, so we only attach it if the endpoint does. + if e.SupportsTailscaleChallenge { + // Note: the set of valid characters in a challenge and the total + // length is limited; see isChallengeChar in cmd/derper for more + // details. + chal := "ts_" + e.URL.Host + req.Header.Set("X-Tailscale-Challenge", chal) + } + + d.mu.Lock() + d.currIfIndex = ifIndex + d.mu.Unlock() + + // Make the actual request, and check if the response looks like a captive portal or not. + r, err := d.httpClient.Do(req) + if err != nil { + return false, err + } + + return e.responseLooksLikeCaptive(r, d.logf), nil +} + +func (d *Detector) dialContext(ctx context.Context, network, addr string) (net.Conn, error) { + d.mu.Lock() + defer d.mu.Unlock() + + ifIndex := d.currIfIndex + + dl := net.Dialer{ + Control: func(network, address string, c syscall.RawConn) error { + return setSocketInterfaceIndex(c, ifIndex, d.logf) + }, + } + + return dl.DialContext(ctx, network, addr) +} diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go new file mode 100644 index 0000000000000..e74273afd922e --- /dev/null +++ b/net/captivedetection/captivedetection_test.go @@ -0,0 +1,60 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package captivedetection + +import ( + "context" + "runtime" + "sync" + "testing" + + "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/net/netmon" +) + +func TestAvailableEndpointsAlwaysAtLeastTwo(t *testing.T) { + endpoints := availableEndpoints(nil, 0, t.Logf, runtime.GOOS) + if len(endpoints) == 0 { + t.Errorf("Expected non-empty AvailableEndpoints, got an empty slice instead") + } + if len(endpoints) == 1 { + t.Errorf("Expected at least two AvailableEndpoints for redundancy, got only one instead") + } + for _, e := range endpoints { + if e.URL.Scheme != "http" { + t.Errorf("Expected HTTP URL in Endpoint, got HTTPS") + } + } +} + +func TestDetectCaptivePortalReturnsFalse(t *testing.T) { + d := NewDetector(t.Logf) + found := d.Detect(context.Background(), netmon.NewStatic(), nil, 0) + if found { + t.Errorf("DetectCaptivePortal returned true, expected false.") + } +} + +func TestAllEndpointsAreUpAndReturnExpectedResponse(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13019") + d := NewDetector(t.Logf) + endpoints := availableEndpoints(nil, 0, t.Logf, runtime.GOOS) + + var wg sync.WaitGroup + for _, e := range endpoints { + wg.Add(1) + go func(endpoint Endpoint) { + defer wg.Done() + found, err := d.verifyCaptivePortalEndpoint(context.Background(), endpoint, 0) + if err != nil { + t.Errorf("verifyCaptivePortalEndpoint failed with endpoint %v: %v", endpoint, err) + } + if found { + t.Errorf("verifyCaptivePortalEndpoint with endpoint %v says we're behind a captive portal, but we aren't", endpoint) + } + }(e) + } + + wg.Wait() +} diff --git a/net/captivedetection/endpoints.go b/net/captivedetection/endpoints.go new file mode 100644 index 0000000000000..450ed4a1cae4a --- /dev/null +++ b/net/captivedetection/endpoints.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package captivedetection + +import ( + "cmp" + "fmt" + "io" + "net/http" + "net/url" + "slices" + + "go4.org/mem" + "tailscale.com/net/dnsfallback" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" +) + +// EndpointProvider is an enum that represents the source of an Endpoint. +type EndpointProvider int + +const ( + // DERPMapPreferred is used for an endpoint that is a DERP node contained in the current preferred DERP region, + // as provided by the DERPMap. + DERPMapPreferred EndpointProvider = iota + // DERPMapOther is used for an endpoint that is a DERP node, but not contained in the current preferred DERP region. + DERPMapOther + // Tailscale is used for endpoints that are the Tailscale coordination server or admin console. + Tailscale +) + +func (p EndpointProvider) String() string { + switch p { + case DERPMapPreferred: + return "DERPMapPreferred" + case Tailscale: + return "Tailscale" + case DERPMapOther: + return "DERPMapOther" + default: + return fmt.Sprintf("EndpointProvider(%d)", p) + } +} + +// Endpoint represents a URL that can be used to detect a captive portal, along with the expected +// result of the HTTP request. +type Endpoint struct { + // URL is the URL that we make an HTTP request to as part of the captive portal detection process. + URL *url.URL + // StatusCode is the expected HTTP status code that we expect to see in the response. + StatusCode int + // ExpectedContent is a string that we expect to see contained in the response body. If this is non-empty, + // we will check that the response body contains this string. If it is empty, we will not check the response body + // and only check the status code. + ExpectedContent string + // SupportsTailscaleChallenge is true if the endpoint will return the sent value of the X-Tailscale-Challenge + // HTTP header in its HTTP response. + SupportsTailscaleChallenge bool + // Provider is the source of the endpoint. This is used to prioritize certain endpoints over others + // (for example, a DERP node in the preferred region should always be used first). + Provider EndpointProvider +} + +func (e Endpoint) String() string { + return fmt.Sprintf("Endpoint{URL=%q, StatusCode=%d, ExpectedContent=%q, SupportsTailscaleChallenge=%v, Provider=%s}", e.URL, e.StatusCode, e.ExpectedContent, e.SupportsTailscaleChallenge, e.Provider.String()) +} + +func (e Endpoint) Equal(other Endpoint) bool { + return e.URL.String() == other.URL.String() && + e.StatusCode == other.StatusCode && + e.ExpectedContent == other.ExpectedContent && + e.SupportsTailscaleChallenge == other.SupportsTailscaleChallenge && + e.Provider == other.Provider +} + +// availableEndpoints returns a set of Endpoints which can be used for captive portal detection by performing +// one or more HTTP requests and looking at the response. The returned Endpoints are ordered by preference, +// with the most preferred Endpoint being the first in the slice. +func availableEndpoints(derpMap *tailcfg.DERPMap, preferredDERPRegionID int, logf logger.Logf, goos string) []Endpoint { + endpoints := []Endpoint{} + + if derpMap == nil || len(derpMap.Regions) == 0 { + // When the client first starts, we don't have a DERPMap in LocalBackend yet. In this case, + // we use the static DERPMap from dnsfallback. + logf("captivedetection: current DERPMap is empty, using map from dnsfallback") + derpMap = dnsfallback.GetDERPMap() + } + // Use the DERP IPs as captive portal detection endpoints. Using IPs is better than hostnames + // because they do not depend on DNS resolution. + for _, region := range derpMap.Regions { + if region.Avoid { + continue + } + for _, node := range region.Nodes { + if node.IPv4 == "" || !node.CanPort80 { + continue + } + str := "http://" + node.IPv4 + "/generate_204" + u, err := url.Parse(str) + if err != nil { + logf("captivedetection: failed to parse DERP node URL %q: %v", str, err) + continue + } + p := DERPMapOther + if region.RegionID == preferredDERPRegionID { + p = DERPMapPreferred + } + e := Endpoint{u, http.StatusNoContent, "", true, p} + endpoints = append(endpoints, e) + } + } + + // Let's also try the default Tailscale coordination server and admin console. + // These are likely to be blocked on some networks. + appendTailscaleEndpoint := func(urlString string) { + u, err := url.Parse(urlString) + if err != nil { + logf("captivedetection: failed to parse Tailscale URL %q: %v", urlString, err) + return + } + endpoints = append(endpoints, Endpoint{u, http.StatusNoContent, "", false, Tailscale}) + } + appendTailscaleEndpoint("http://controlplane.tailscale.com/generate_204") + appendTailscaleEndpoint("http://login.tailscale.com/generate_204") + + // Sort the endpoints by provider so that we can prioritize DERP nodes in the preferred region, followed by + // any other DERP server elsewhere, then followed by Tailscale endpoints. + slices.SortFunc(endpoints, func(x, y Endpoint) int { + return cmp.Compare(x.Provider, y.Provider) + }) + + return endpoints +} + +// responseLooksLikeCaptive checks if the given HTTP response matches the expected response for the Endpoint. +func (e Endpoint) responseLooksLikeCaptive(r *http.Response, logf logger.Logf) bool { + defer r.Body.Close() + + // Check the status code first. + if r.StatusCode != e.StatusCode { + logf("[v1] unexpected status code in captive portal response: want=%d, got=%d", e.StatusCode, r.StatusCode) + return true + } + + // If the endpoint supports the Tailscale challenge header, check that the response contains the expected header. + if e.SupportsTailscaleChallenge { + expectedResponse := "response ts_" + e.URL.Host + hasResponse := r.Header.Get("X-Tailscale-Response") == expectedResponse + if !hasResponse { + // The response did not contain the expected X-Tailscale-Response header, which means we are most likely + // behind a captive portal (somebody is tampering with the response headers). + logf("captive portal check response did not contain expected X-Tailscale-Response header: want=%q, got=%q", expectedResponse, r.Header.Get("X-Tailscale-Response")) + return true + } + } + + // If we don't have an expected content string, we don't need to check the response body. + if e.ExpectedContent == "" { + return false + } + + // Read the response body and check if it contains the expected content. + b, err := io.ReadAll(io.LimitReader(r.Body, 4096)) + if err != nil { + logf("reading captive portal check response body failed: %v", err) + return false + } + hasExpectedContent := mem.Contains(mem.B(b), mem.S(e.ExpectedContent)) + if !hasExpectedContent { + // The response body did not contain the expected content, that means we are most likely behind a captive portal. + logf("[v1] captive portal check response body did not contain expected content: want=%q", e.ExpectedContent) + return true + } + + // If we got here, the response looks good. + return false +} diff --git a/net/captivedetection/rawconn.go b/net/captivedetection/rawconn.go new file mode 100644 index 0000000000000..a7197d9df2577 --- /dev/null +++ b/net/captivedetection/rawconn.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(ios || darwin) + +package captivedetection + +import ( + "syscall" + + "tailscale.com/types/logger" +) + +// setSocketInterfaceIndex sets the IP_BOUND_IF socket option on the given RawConn. +// This forces the socket to use the given interface. +func setSocketInterfaceIndex(c syscall.RawConn, ifIndex int, logf logger.Logf) error { + // No-op on non-Darwin platforms. + return nil +} diff --git a/net/captivedetection/rawconn_apple.go b/net/captivedetection/rawconn_apple.go new file mode 100644 index 0000000000000..12b4446e62eb8 --- /dev/null +++ b/net/captivedetection/rawconn_apple.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios || darwin + +package captivedetection + +import ( + "syscall" + + "golang.org/x/sys/unix" + "tailscale.com/types/logger" +) + +// setSocketInterfaceIndex sets the IP_BOUND_IF socket option on the given RawConn. +// This forces the socket to use the given interface. +func setSocketInterfaceIndex(c syscall.RawConn, ifIndex int, logf logger.Logf) error { + return c.Control((func(fd uintptr) { + err := unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_BOUND_IF, ifIndex) + if err != nil { + logf("captivedetection: failed to set IP_BOUND_IF (ifIndex=%d): %v", ifIndex, err) + } + })) +} diff --git a/net/dns/manager.go b/net/dns/manager.go index eee2d5a7d14df..dfce5b2acaf82 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -82,7 +82,7 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, m := &Manager{ logf: logf, - resolver: resolver.New(logf, linkSel, dialer, knobs), + resolver: resolver.New(logf, linkSel, dialer, health, knobs), os: oscfg, health: health, knobs: knobs, @@ -538,7 +538,9 @@ func (m *Manager) FlushCaches() error { // CleanUp restores the system DNS configuration to its original state // in case the Tailscale daemon terminated without closing the router. // No other state needs to be instantiated before this runs. -func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) { +// +// health must not be nil +func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { oscfg, err := NewOSConfigurator(logf, nil, nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) @@ -546,7 +548,7 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) { } d := &tsdial.Dialer{Logf: logf} d.SetNetMon(netMon) - dns := NewManager(logf, oscfg, nil, d, nil, nil, runtime.GOOS) + dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS) if err := dns.Down(); err != nil { logf("dns down: %v", err) } diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index 2982c61290622..f4c42791e9b5b 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" dns "golang.org/x/net/dns/dnsmessage" + "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tstest" @@ -88,7 +89,7 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, nil, tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -173,7 +174,7 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, nil, tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + m := NewManager(log, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index ca3227aab3bf1..c528175214f67 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -6,6 +6,8 @@ package resolver import ( "bytes" "context" + "crypto/sha256" + "encoding/base64" "encoding/binary" "errors" "fmt" @@ -23,6 +25,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/health" "tailscale.com/net/dns/publicdns" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" @@ -164,6 +167,23 @@ func clampEDNSSize(packet []byte, maxSize uint16) { binary.BigEndian.PutUint16(opt[3:5], maxSize) } +// dnsForwarderFailing should be raised when the forwarder is unable to reach the +// upstream resolvers. This is a high severity warning as it results in "no internet". +// This warning must be cleared when the forwarder is working again. +// +// We allow for 5 second grace period to ensure this is not raised for spurious errors +// under the assumption that DNS queries are relatively frequent and a subsequent +// successful query will clear any one-off errors. +var dnsForwarderFailing = health.Register(&health.Warnable{ + Code: "dns-forward-failing", + Title: "DNS unavailable", + Severity: health.SeverityMedium, + DependsOn: []*health.Warnable{health.NetworkStatusWarnable}, + Text: health.StaticMessage("Tailscale can't reach the configured DNS servers. Internet connectivity may be affected."), + ImpactsConnectivity: true, + TimeToVisible: 5 * time.Second, +}) + type route struct { Suffix dnsname.FQDN Resolvers []resolverAndDelay @@ -188,6 +208,7 @@ type forwarder struct { netMon *netmon.Monitor // always non-nil linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it dialer *tsdial.Dialer + health *health.Tracker // always non-nil controlKnobs *controlknobs.Knobs // or nil @@ -219,7 +240,7 @@ type forwarder struct { missingUpstreamRecovery func() } -func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, knobs *controlknobs.Knobs) *forwarder { +func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { if netMon == nil { panic("nil netMon") } @@ -228,6 +249,7 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS netMon: netMon, linkSel: linkSel, dialer: dialer, + health: health, controlKnobs: knobs, missingUpstreamRecovery: func() {}, } @@ -478,9 +500,10 @@ var ( func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) (ret []byte, err error) { if verboseDNSForward() { id := forwarderCount.Add(1) - f.logf("forwarder.send(%q) [%d] ...", rr.name.Addr, id) + domain, typ, _ := nameFromQuery(fq.packet) + f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id) defer func() { - f.logf("forwarder.send(%q) [%d] = %v, %v", rr.name.Addr, id, len(ret), err) + f.logf("forwarder.send(%q, %d, %v, %d) [%d] = %v, %v", rr.name.Addr, fq.txid, typ, len(domain), id, len(ret), err) }() } if strings.HasPrefix(rr.name.Addr, "http://") { @@ -846,7 +869,7 @@ type forwardQuery struct { // node DNS proxy queries), otherwise f.resolvers is used. func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, responseChan chan<- packet, resolvers ...resolverAndDelay) error { metricDNSFwd.Add(1) - domain, err := nameFromQuery(query.bs) + domain, typ, err := nameFromQuery(query.bs) if err != nil { metricDNSFwdErrorName.Add(1) return err @@ -887,6 +910,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo resolvers = f.resolvers(domain) if len(resolvers) == 0 { metricDNSFwdErrorNoUpstream.Add(1) + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""}) f.logf("no upstream resolvers set, returning SERVFAIL") // Attempt to recompile the DNS configuration @@ -909,6 +933,8 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo case responseChan <- res: return nil } + } else { + f.health.SetHealthy(dnsForwarderFailing) } } @@ -920,6 +946,12 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } defer fq.closeOnCtxDone.Close() + if verboseDNSForward() { + domainSha256 := sha256.Sum256([]byte(domain)) + domainSig := base64.RawStdEncoding.EncodeToString(domainSha256[:3]) + f.logf("request(%d, %v, %d, %s) %d...", fq.txid, typ, len(domain), domainSig, len(fq.packet)) + } + resc := make(chan []byte, 1) // it's fine buffered or not errc := make(chan error, 1) // it's fine buffered or not too for i := range resolvers { @@ -959,7 +991,11 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo metricDNSFwdErrorContext.Add(1) return fmt.Errorf("waiting to send response: %w", ctx.Err()) case responseChan <- packet{v, query.family, query.addr}: + if verboseDNSForward() { + f.logf("response(%d, %v, %d) = %d, nil", fq.txid, typ, len(domain), len(v)) + } metricDNSFwdSuccess.Add(1) + f.health.SetHealthy(dnsForwarderFailing) return nil } case err := <-errc: @@ -979,7 +1015,15 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo case <-ctx.Done(): metricDNSFwdErrorContext.Add(1) metricDNSFwdErrorContextGotError.Add(1) + var resolverAddrs []string + for _, rr := range resolvers { + resolverAddrs = append(resolverAddrs, rr.name.Addr) + } + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) case responseChan <- res: + if verboseDNSForward() { + f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) + } } } return firstErr @@ -999,6 +1043,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo for _, rr := range resolvers { resolverAddrs = append(resolverAddrs, rr.name.Addr) } + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) return fmt.Errorf("waiting for response or error from %v: %w", resolverAddrs, ctx.Err()) } } @@ -1007,24 +1052,28 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo var initListenConfig func(_ *net.ListenConfig, _ *netmon.Monitor, tunName string) error // nameFromQuery extracts the normalized query name from bs. -func nameFromQuery(bs []byte) (dnsname.FQDN, error) { +func nameFromQuery(bs []byte) (dnsname.FQDN, dns.Type, error) { var parser dns.Parser hdr, err := parser.Start(bs) if err != nil { - return "", err + return "", 0, err } if hdr.Response { - return "", errNotQuery + return "", 0, errNotQuery } q, err := parser.Question() if err != nil { - return "", err + return "", 0, err } n := q.Name.Data[:q.Name.Length] - return dnsname.ToFQDN(rawNameToLower(n)) + fqdn, err := dnsname.ToFQDN(rawNameToLower(n)) + if err != nil { + return "", 0, err + } + return fqdn, q.Type, nil } // nxDomainResponse returns an NXDomain DNS reply for the provided request. diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index e723af620d100..465618a54596c 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -24,6 +24,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/types/dnstype" @@ -200,7 +201,7 @@ func BenchmarkNameFromQuery(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - _, err := nameFromQuery(msg) + _, _, err := nameFromQuery(msg) if err != nil { b.Fatal(err) } @@ -457,7 +458,7 @@ func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwa var dialer tsdial.Dialer dialer.SetNetMon(netMon) - fwd := newForwarder(tb.Logf, netMon, nil, &dialer, nil) + fwd := newForwarder(tb.Logf, netMon, nil, &dialer, new(health.Tracker), nil) if modify != nil { modify(fwd) } diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index a3f3d7010ab81..90e447020ed79 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -25,6 +25,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" @@ -202,6 +203,7 @@ type Resolver struct { logf logger.Logf netMon *netmon.Monitor // non-nil dialer *tsdial.Dialer // non-nil + health *health.Tracker // non-nil saveConfigForTests func(cfg Config) // used in tests to capture resolver config // forwarder forwards requests to upstream nameservers. forwarder *forwarder @@ -224,10 +226,14 @@ type ForwardLinkSelector interface { } // New returns a new resolver. -func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, knobs *controlknobs.Knobs) *Resolver { +// dialer and health must be non-nil. +func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *Resolver { if dialer == nil { panic("nil Dialer") } + if health == nil { + panic("nil health") + } netMon := dialer.NetMon() if netMon == nil { logf("nil netMon") @@ -239,8 +245,9 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, k hostToIP: map[dnsname.FQDN][]netip.Addr{}, ipToHost: map[netip.Addr]dnsname.FQDN{}, dialer: dialer, + health: health, } - r.forwarder = newForwarder(r.logf, netMon, linkSel, dialer, knobs) + r.forwarder = newForwarder(r.logf, netMon, linkSel, dialer, health, knobs) return r } diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index e1477e34294ac..e2c4750b5c1a3 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -23,6 +23,7 @@ import ( miekdns "github.com/miekg/dns" dns "golang.org/x/net/dns/dnsmessage" + "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -354,6 +355,7 @@ func newResolver(t testing.TB) *Resolver { return New(t.Logf, nil, // no link selector tsdial.NewDialer(netmon.NewStatic()), + new(health.Tracker), nil, // no control knobs ) } @@ -1068,7 +1070,7 @@ func TestForwardLinkSelection(t *testing.T) { return "special" } return "" - }), new(tsdial.Dialer), nil /* no control knobs */) + }), new(tsdial.Dialer), new(health.Tracker), nil /* no control knobs */) // Test non-special IP. if got, err := fwd.packetListener(netip.Addr{}); err != nil { diff --git a/net/dnsfallback/dns-fallback-servers.json b/net/dnsfallback/dns-fallback-servers.json index 6b878014783c9..42dd0713b6b5d 100644 --- a/net/dnsfallback/dns-fallback-servers.json +++ b/net/dnsfallback/dns-fallback-servers.json @@ -10,21 +10,24 @@ "RegionID": 1, "HostName": "derp1c.tailscale.com", "IPv4": "104.248.8.210", - "IPv6": "2604:a880:800:10::7a0:e001" + "IPv6": "2604:a880:800:10::7a0:e001", + "CanPort80": true }, { "Name": "1d", "RegionID": 1, "HostName": "derp1d.tailscale.com", "IPv4": "165.22.33.71", - "IPv6": "2604:a880:800:10::7fe:f001" + "IPv6": "2604:a880:800:10::7fe:f001", + "CanPort80": true }, { "Name": "1e", "RegionID": 1, "HostName": "derp1e.tailscale.com", "IPv4": "64.225.56.166", - "IPv6": "2604:a880:800:10::873:4001" + "IPv6": "2604:a880:800:10::873:4001", + "CanPort80": true } ] }, @@ -38,7 +41,8 @@ "RegionID": 10, "HostName": "derp10.tailscale.com", "IPv4": "137.220.36.168", - "IPv6": "2001:19f0:8001:2d9:5400:2ff:feef:bbb1" + "IPv6": "2001:19f0:8001:2d9:5400:2ff:feef:bbb1", + "CanPort80": true } ] }, @@ -52,7 +56,8 @@ "RegionID": 11, "HostName": "derp11.tailscale.com", "IPv4": "18.230.97.74", - "IPv6": "2600:1f1e:ee4:5611:ec5c:1736:d43b:a454" + "IPv6": "2600:1f1e:ee4:5611:ec5c:1736:d43b:a454", + "CanPort80": true } ] }, @@ -66,21 +71,24 @@ "RegionID": 12, "HostName": "derp12.tailscale.com", "IPv4": "216.128.144.130", - "IPv6": "2001:19f0:5c01:289:5400:3ff:fe8d:cb5e" + "IPv6": "2001:19f0:5c01:289:5400:3ff:fe8d:cb5e", + "CanPort80": true }, { "Name": "12b", "RegionID": 12, "HostName": "derp12b.tailscale.com", "IPv4": "45.63.71.144", - "IPv6": "2001:19f0:5c01:48a:5400:3ff:fe8d:cb5f" + "IPv6": "2001:19f0:5c01:48a:5400:3ff:fe8d:cb5f", + "CanPort80": true }, { "Name": "12c", "RegionID": 12, "HostName": "derp12c.tailscale.com", "IPv4": "149.28.119.105", - "IPv6": "2001:19f0:5c01:2cb:5400:3ff:fe8d:cb60" + "IPv6": "2001:19f0:5c01:2cb:5400:3ff:fe8d:cb60", + "CanPort80": true } ] }, @@ -94,21 +102,24 @@ "RegionID": 2, "HostName": "derp2d.tailscale.com", "IPv4": "192.73.252.65", - "IPv6": "2607:f740:0:3f::287" + "IPv6": "2607:f740:0:3f::287", + "CanPort80": true }, { "Name": "2e", "RegionID": 2, "HostName": "derp2e.tailscale.com", "IPv4": "192.73.252.134", - "IPv6": "2607:f740:0:3f::44c" + "IPv6": "2607:f740:0:3f::44c", + "CanPort80": true }, { "Name": "2f", "RegionID": 2, "HostName": "derp2f.tailscale.com", "IPv4": "208.111.34.178", - "IPv6": "2607:f740:0:3f::f4" + "IPv6": "2607:f740:0:3f::f4", + "CanPort80": true } ] }, @@ -122,7 +133,8 @@ "RegionID": 3, "HostName": "derp3.tailscale.com", "IPv4": "68.183.179.66", - "IPv6": "2400:6180:0:d1::67d:8001" + "IPv6": "2400:6180:0:d1::67d:8001", + "CanPort80": true } ] }, @@ -136,21 +148,24 @@ "RegionID": 4, "HostName": "derp4c.tailscale.com", "IPv4": "134.122.77.138", - "IPv6": "2a03:b0c0:3:d0::1501:6001" + "IPv6": "2a03:b0c0:3:d0::1501:6001", + "CanPort80": true }, { "Name": "4d", "RegionID": 4, "HostName": "derp4d.tailscale.com", "IPv4": "134.122.94.167", - "IPv6": "2a03:b0c0:3:d0::1501:b001" + "IPv6": "2a03:b0c0:3:d0::1501:b001", + "CanPort80": true }, { "Name": "4e", "RegionID": 4, "HostName": "derp4e.tailscale.com", "IPv4": "134.122.74.153", - "IPv6": "2a03:b0c0:3:d0::29:9001" + "IPv6": "2a03:b0c0:3:d0::29:9001", + "CanPort80": true } ] }, @@ -164,7 +179,8 @@ "RegionID": 5, "HostName": "derp5.tailscale.com", "IPv4": "103.43.75.49", - "IPv6": "2001:19f0:5801:10b7:5400:2ff:feaa:284c" + "IPv6": "2001:19f0:5801:10b7:5400:2ff:feaa:284c", + "CanPort80": true } ] }, @@ -178,7 +194,8 @@ "RegionID": 6, "HostName": "derp6.tailscale.com", "IPv4": "68.183.90.120", - "IPv6": "2400:6180:100:d0::982:d001" + "IPv6": "2400:6180:100:d0::982:d001", + "CanPort80": true } ] }, @@ -192,7 +209,8 @@ "RegionID": 7, "HostName": "derp7.tailscale.com", "IPv4": "167.179.89.145", - "IPv6": "2401:c080:1000:467f:5400:2ff:feee:22aa" + "IPv6": "2401:c080:1000:467f:5400:2ff:feee:22aa", + "CanPort80": true } ] }, @@ -206,21 +224,24 @@ "RegionID": 8, "HostName": "derp8b.tailscale.com", "IPv4": "46.101.74.201", - "IPv6": "2a03:b0c0:1:d0::ec1:e001" + "IPv6": "2a03:b0c0:1:d0::ec1:e001", + "CanPort80": true }, { "Name": "8c", "RegionID": 8, "HostName": "derp8c.tailscale.com", "IPv4": "206.189.16.32", - "IPv6": "2a03:b0c0:1:d0::e1f:4001" + "IPv6": "2a03:b0c0:1:d0::e1f:4001", + "CanPort80": true }, { "Name": "8d", "RegionID": 8, "HostName": "derp8d.tailscale.com", "IPv4": "178.62.44.132", - "IPv6": "2a03:b0c0:1:d0::e08:e001" + "IPv6": "2a03:b0c0:1:d0::e08:e001", + "CanPort80": true } ] }, @@ -234,21 +255,24 @@ "RegionID": 9, "HostName": "derp9.tailscale.com", "IPv4": "207.148.3.137", - "IPv6": "2001:19f0:6401:1d9c:5400:2ff:feef:bb82" + "IPv6": "2001:19f0:6401:1d9c:5400:2ff:feef:bb82", + "CanPort80": true }, { "Name": "9b", "RegionID": 9, "HostName": "derp9b.tailscale.com", "IPv4": "144.202.67.195", - "IPv6": "2001:19f0:6401:eb5:5400:3ff:fe8d:6d9b" + "IPv6": "2001:19f0:6401:eb5:5400:3ff:fe8d:6d9b", + "CanPort80": true }, { "Name": "9c", "RegionID": 9, "HostName": "derp9c.tailscale.com", "IPv4": "155.138.243.219", - "IPv6": "2001:19f0:6401:fe7:5400:3ff:fe8d:6d9c" + "IPv6": "2001:19f0:6401:fe7:5400:3ff:fe8d:6d9c", + "CanPort80": true } ] } diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 6b3ac864edd50..4c5d5fa2f2743 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -219,7 +219,7 @@ func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Track ip netip.Addr } - dm := getDERPMap() + dm := GetDERPMap() var cands4, cands6 []nameIP for _, dr := range dm.Regions { @@ -281,6 +281,7 @@ func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Track func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr, queryName string, logf logger.Logf, ht *health.Tracker, netMon *netmon.Monitor) (dnsMap, error) { dialer := netns.NewDialer(logf, netMon) tr := http.DefaultTransport.(*http.Transport).Clone() + tr.DisableKeepAlives = true // This transport is meant to be used once. tr.Proxy = tshttpproxy.ProxyFromEnvironment tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443")) @@ -310,9 +311,12 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr // https://derp10.tailscale.com/bootstrap-dns type dnsMap map[string][]netip.Addr -// getDERPMap returns some DERP map. The DERP servers also run a fallback -// DNS server. -func getDERPMap() *tailcfg.DERPMap { +// GetDERPMap returns a fallback DERP map that is always available, useful for basic +// bootstrapping purposes. The dynamically updated DERP map in LocalBackend should +// always be preferred over this. Use this DERP map only when the control plane is +// unreachable or hasn't been reached yet. The DERP servers in the returned map also +// run a fallback DNS server. +func GetDERPMap() *tailcfg.DERPMap { dm := getStaticDERPMap() // Merge in any DERP servers from the cached map that aren't in the diff --git a/net/dnsfallback/dnsfallback_test.go b/net/dnsfallback/dnsfallback_test.go index 4298499b0189e..16f5027d4850f 100644 --- a/net/dnsfallback/dnsfallback_test.go +++ b/net/dnsfallback/dnsfallback_test.go @@ -18,7 +18,7 @@ import ( ) func TestGetDERPMap(t *testing.T) { - dm := getDERPMap() + dm := GetDERPMap() if dm == nil { t.Fatal("nil") } @@ -78,7 +78,7 @@ func TestCache(t *testing.T) { } // Verify that our DERP map is merged with the cache. - dm := getDERPMap() + dm := GetDERPMap() region, ok := dm.Regions[99] if !ok { t.Fatal("expected region 99") diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 80957039e3ca9..8eb50a61dd340 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -14,13 +14,11 @@ import ( "io" "log" "maps" - "math/rand/v2" "net" "net/http" "net/netip" "runtime" "sort" - "strings" "sync" "syscall" "time" @@ -28,6 +26,7 @@ import ( "github.com/tcnksm/go-httpstat" "tailscale.com/derp/derphttp" "tailscale.com/envknob" + "tailscale.com/net/captivedetection" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -847,11 +846,8 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe tmr := time.AfterFunc(c.captivePortalDelay(), func() { defer close(ch) - found, err := c.checkCaptivePortal(ctx, dm, preferredDERP) - if err != nil { - c.logf("[v1] checkCaptivePortal: %v", err) - return - } + d := captivedetection.NewDetector(c.logf) + found := d.Detect(ctx, c.NetMon, dm, preferredDERP) rs.report.CaptivePortal.Set(found) }) @@ -988,75 +984,6 @@ func (c *Client) finishAndStoreReport(rs *reportState, dm *tailcfg.DERPMap) *Rep return report } -var noRedirectClient = &http.Client{ - // No redirects allowed - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - - // Remaining fields are the same as the default client. - Transport: http.DefaultClient.Transport, - Jar: http.DefaultClient.Jar, - Timeout: http.DefaultClient.Timeout, -} - -// checkCaptivePortal reports whether or not we think the system is behind a -// captive portal, detected by making a request to a URL that we know should -// return a "204 No Content" response and checking if that's what we get. -// -// The boolean return is whether we think we have a captive portal. -func (c *Client) checkCaptivePortal(ctx context.Context, dm *tailcfg.DERPMap, preferredDERP int) (bool, error) { - defer noRedirectClient.CloseIdleConnections() - - // If we have a preferred DERP region with more than one node, try - // that; otherwise, pick a random one not marked as "Avoid". - if preferredDERP == 0 || dm.Regions[preferredDERP] == nil || - (preferredDERP != 0 && len(dm.Regions[preferredDERP].Nodes) == 0) { - rids := make([]int, 0, len(dm.Regions)) - for id, reg := range dm.Regions { - if reg == nil || reg.Avoid || len(reg.Nodes) == 0 { - continue - } - rids = append(rids, id) - } - if len(rids) == 0 { - return false, nil - } - preferredDERP = rids[rand.IntN(len(rids))] - } - - node := dm.Regions[preferredDERP].Nodes[0] - - if strings.HasSuffix(node.HostName, tailcfg.DotInvalid) { - // Don't try to connect to invalid hostnames. This occurred in tests: - // https://github.com/tailscale/tailscale/issues/6207 - // TODO(bradfitz,andrew-d): how to actually handle this nicely? - return false, nil - } - - req, err := http.NewRequestWithContext(ctx, "GET", "http://"+node.HostName+"/generate_204", nil) - if err != nil { - return false, err - } - - // Note: the set of valid characters in a challenge and the total - // length is limited; see isChallengeChar in cmd/derper for more - // details. - chal := "ts_" + node.HostName - req.Header.Set("X-Tailscale-Challenge", chal) - r, err := noRedirectClient.Do(req) - if err != nil { - return false, err - } - defer r.Body.Close() - - expectedResponse := "response " + chal - validResponse := r.Header.Get("X-Tailscale-Response") == expectedResponse - - c.logf("[v2] checkCaptivePortal url=%q status_code=%d valid_response=%v", req.URL.String(), r.StatusCode, validResponse) - return r.StatusCode != 204 || !validResponse, nil -} - // runHTTPOnlyChecks is the netcheck done by environments that can // only do HTTP requests, such as ws/wasm. func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *reportState, dm *tailcfg.DERPMap) error { diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 8b71247449244..26e52602afaa5 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -15,14 +15,12 @@ import ( "sort" "strconv" "strings" - "sync/atomic" "testing" "time" "tailscale.com/net/netmon" "tailscale.com/net/stun/stuntest" "tailscale.com/tailcfg" - "tailscale.com/tstest" "tailscale.com/tstest/nettest" ) @@ -778,54 +776,6 @@ func TestSortRegions(t *testing.T) { } } -func TestNoCaptivePortalWhenUDP(t *testing.T) { - nettest.SkipIfNoNetwork(t) // empirically. not sure why. - - // Override noRedirectClient to handle the /generate_204 endpoint - var generate204Called atomic.Bool - tr := RoundTripFunc(func(req *http.Request) *http.Response { - if !strings.HasSuffix(req.URL.String(), "/generate_204") { - panic("bad URL: " + req.URL.String()) - } - generate204Called.Store(true) - return &http.Response{ - StatusCode: http.StatusNoContent, - Header: make(http.Header), - } - }) - - tstest.Replace(t, &noRedirectClient.Transport, http.RoundTripper(tr)) - - stunAddr, cleanup := stuntest.Serve(t) - defer cleanup() - - c := newTestClient(t) - c.testEnoughRegions = 1 - // Set the delay long enough that we have time to cancel it - // when our STUN probe succeeds. - c.testCaptivePortalDelay = 10 * time.Second - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - if err := c.Standalone(ctx, "127.0.0.1:0"); err != nil { - t.Fatal(err) - } - - r, err := c.GetReport(ctx, stuntest.DERPMapOf(stunAddr.String()), nil) - if err != nil { - t.Fatal(err) - } - - // Should not have called our captive portal function. - if generate204Called.Load() { - t.Errorf("captive portal check called; expected no call") - } - if r.CaptivePortal != "" { - t.Errorf("got CaptivePortal=%q, want empty", r.CaptivePortal) - } -} - type RoundTripFunc func(req *http.Request) *http.Response func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index 55e29cc29a001..ac5e89d76cc2e 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -92,7 +92,9 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) // If the address doesn't parse, use the default index. addr, err := parseAddress(address) if err != nil { - logf("[unexpected] netns: error parsing address %q: %v", address, err) + if err != errUnspecifiedHost { + logf("[unexpected] netns: error parsing address %q: %v", address, err) + } return defaultIdx() } diff --git a/net/netns/netns_dw.go b/net/netns/netns_dw.go index bf654666fcda8..f92ba9462c32a 100644 --- a/net/netns/netns_dw.go +++ b/net/netns/netns_dw.go @@ -6,16 +6,22 @@ package netns import ( + "errors" "net" "net/netip" ) +var errUnspecifiedHost = errors.New("unspecified host") + func parseAddress(address string) (addr netip.Addr, err error) { host, _, err := net.SplitHostPort(address) if err != nil { // error means the string didn't contain a port number, so use the string directly host = address } + if host == "" { + return addr, errUnspecifiedHost + } return netip.ParseAddr(host) } diff --git a/net/netns/netns_windows.go b/net/netns/netns_windows.go index 3c4bafbb548a6..afbda0f47ece6 100644 --- a/net/netns/netns_windows.go +++ b/net/netns/netns_windows.go @@ -86,23 +86,26 @@ func controlC(logf logger.Logf, network, address string, c syscall.RawConn) (err var ifaceIdxV4, ifaceIdxV6 uint32 if useRoute := bindToInterfaceByRoute.Load() || bindToInterfaceByRouteEnv(); useRoute { addr, err := parseAddress(address) - if err != nil { - return fmt.Errorf("parseAddress: %w", err) - } - - if canV4 && (addr.Is4() || addr.Is4In6()) { - addrV4 := addr.Unmap() - ifaceIdxV4, err = getInterfaceIndex(logf, addrV4, defIfaceIdxV4) - if err != nil { - return fmt.Errorf("getInterfaceIndex(%v): %w", addrV4, err) + if err == nil { + if canV4 && (addr.Is4() || addr.Is4In6()) { + addrV4 := addr.Unmap() + ifaceIdxV4, err = getInterfaceIndex(logf, addrV4, defIfaceIdxV4) + if err != nil { + return fmt.Errorf("getInterfaceIndex(%v): %w", addrV4, err) + } } - } - if canV6 && addr.Is6() { - ifaceIdxV6, err = getInterfaceIndex(logf, addr, defIfaceIdxV6) - if err != nil { - return fmt.Errorf("getInterfaceIndex(%v): %w", addr, err) + if canV6 && addr.Is6() { + ifaceIdxV6, err = getInterfaceIndex(logf, addr, defIfaceIdxV6) + if err != nil { + return fmt.Errorf("getInterfaceIndex(%v): %w", addr, err) + } + } + } else { + if err != errUnspecifiedHost { + logf("[unexpected] netns: error parsing address %q: %v", address, err) } + ifaceIdxV4, ifaceIdxV6 = defIfaceIdxV4, defIfaceIdxV6 } } else { ifaceIdxV4, ifaceIdxV6 = defIfaceIdxV4, defIfaceIdxV6 diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go index c49ae3626a2d6..547ea3a3577ed 100644 --- a/net/packet/checksum/checksum.go +++ b/net/packet/checksum/checksum.go @@ -61,7 +61,7 @@ func UpdateDstAddr(q *packet.Parsed, dst netip.Addr) { b := q.Buffer() if dst.Is6() { v6 := dst.As16() - copy(b[24:36], v6[:]) + copy(b[24:40], v6[:]) updateV6PacketChecksums(q, old, dst) } else { v4 := dst.As4() diff --git a/net/packet/checksum/checksum_test.go b/net/packet/checksum/checksum_test.go index aeb030c1c8e76..bf818743d3dbf 100644 --- a/net/packet/checksum/checksum_test.go +++ b/net/packet/checksum/checksum_test.go @@ -5,6 +5,7 @@ package checksum import ( "encoding/binary" + "math/rand/v2" "net/netip" "testing" @@ -94,7 +95,7 @@ func TestHeaderChecksumsV4(t *testing.T) { } func TestNatChecksumsV6UDP(t *testing.T) { - a1, a2 := netip.MustParseAddr("a::1"), netip.MustParseAddr("b::1") + a1, a2 := randV6Addr(), randV6Addr() // Make a fake UDP packet with 32 bytes of zeros as the datagram payload. b := header.IPv6(make([]byte, header.IPv6MinimumSize+header.UDPMinimumSize+32)) @@ -124,25 +125,43 @@ func TestNatChecksumsV6UDP(t *testing.T) { } // Parse the packet. - var p packet.Parsed + var p, p2 packet.Parsed p.Decode(b) t.Log(p.String()) // Update the source address of the packet to be the same as the dest. UpdateSrcAddr(&p, a2) + p2.Decode(p.Buffer()) + if p2.Src.Addr() != a2 { + t.Fatalf("got %v, want %v", p2.Src, a2) + } if !udp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a2.AsSlice()), checksum.Checksum(b.Payload()[header.UDPMinimumSize:], 0)) { t.Fatal("incorrect checksum after updating source address") } // Update the dest address of the packet to be the original source address. UpdateDstAddr(&p, a1) + p2.Decode(p.Buffer()) + if p2.Dst.Addr() != a1 { + t.Fatalf("got %v, want %v", p2.Dst, a1) + } if !udp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a1.AsSlice()), checksum.Checksum(b.Payload()[header.UDPMinimumSize:], 0)) { t.Fatal("incorrect checksum after updating destination address") } } +func randV6Addr() netip.Addr { + a1, a2 := rand.Int64(), rand.Int64() + return netip.AddrFrom16([16]byte{ + byte(a1 >> 56), byte(a1 >> 48), byte(a1 >> 40), byte(a1 >> 32), + byte(a1 >> 24), byte(a1 >> 16), byte(a1 >> 8), byte(a1), + byte(a2 >> 56), byte(a2 >> 48), byte(a2 >> 40), byte(a2 >> 32), + byte(a2 >> 24), byte(a2 >> 16), byte(a2 >> 8), byte(a2), + }) +} + func TestNatChecksumsV6TCP(t *testing.T) { - a1, a2 := netip.MustParseAddr("a::1"), netip.MustParseAddr("b::1") + a1, a2 := randV6Addr(), randV6Addr() // Make a fake TCP packet with no payload. b := header.IPv6(make([]byte, header.IPv6MinimumSize+header.TCPMinimumSize)) @@ -178,18 +197,26 @@ func TestNatChecksumsV6TCP(t *testing.T) { } // Parse the packet. - var p packet.Parsed + var p, p2 packet.Parsed p.Decode(b) t.Log(p.String()) // Update the source address of the packet to be the same as the dest. UpdateSrcAddr(&p, a2) + p2.Decode(p.Buffer()) + if p2.Src.Addr() != a2 { + t.Fatalf("got %v, want %v", p2.Src, a2) + } if !tcp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a2.AsSlice()), 0, 0) { t.Fatal("incorrect checksum after updating source address") } // Update the dest address of the packet to be the original source address. UpdateDstAddr(&p, a1) + p2.Decode(p.Buffer()) + if p2.Dst.Addr() != a1 { + t.Fatalf("got %v, want %v", p2.Dst, a1) + } if !tcp.IsChecksumValid(tcpip.AddrFrom16Slice(a2.AsSlice()), tcpip.AddrFrom16Slice(a1.AsSlice()), 0, 0) { t.Fatal("incorrect checksum after updating destination address") } diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index b774ebe2477b2..0d651537fac9a 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -13,8 +13,10 @@ package socks5 import ( + "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "log" @@ -121,7 +123,7 @@ func (s *Server) Serve(l net.Listener) error { } go func() { defer c.Close() - conn := &Conn{clientConn: c, srv: s} + conn := &Conn{logf: s.Logf, clientConn: c, srv: s} err := conn.Run() if err != nil { s.logf("client connection failed: %v", err) @@ -136,9 +138,12 @@ type Conn struct { // The struct is filled by each of the internal // methods in turn as the transaction progresses. + logf logger.Logf srv *Server clientConn net.Conn request *request + + udpClientAddr net.Addr } // Run starts the new connection. @@ -172,58 +177,59 @@ func (c *Conn) Run() error { func (c *Conn) handleRequest() error { req, err := parseClientRequest(c.clientConn) if err != nil { - res := &response{reply: generalFailure} + res := errorResponse(generalFailure) buf, _ := res.marshal() c.clientConn.Write(buf) return err } - if req.command != connect { - res := &response{reply: commandNotSupported} + + c.request = req + switch req.command { + case connect: + return c.handleTCP() + case udpAssociate: + return c.handleUDP() + default: + res := errorResponse(commandNotSupported) buf, _ := res.marshal() c.clientConn.Write(buf) return fmt.Errorf("unsupported command %v", req.command) } - c.request = req +} +func (c *Conn) handleTCP() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv, err := c.srv.dial( ctx, "tcp", - net.JoinHostPort(c.request.destination, strconv.Itoa(int(c.request.port))), + c.request.destination.hostPort(), ) if err != nil { - res := &response{reply: generalFailure} + res := errorResponse(generalFailure) buf, _ := res.marshal() c.clientConn.Write(buf) return err } defer srv.Close() - serverAddr, serverPortStr, err := net.SplitHostPort(srv.LocalAddr().String()) + + localAddr := srv.LocalAddr().String() + serverAddr, serverPort, err := splitHostPort(localAddr) if err != nil { return err } - serverPort, _ := strconv.Atoi(serverPortStr) - var bindAddrType addrType - if ip := net.ParseIP(serverAddr); ip != nil { - if ip.To4() != nil { - bindAddrType = ipv4 - } else { - bindAddrType = ipv6 - } - } else { - bindAddrType = domainName - } res := &response{ - reply: success, - bindAddrType: bindAddrType, - bindAddr: serverAddr, - bindPort: uint16(serverPort), + reply: success, + bindAddr: socksAddr{ + addrType: getAddrType(serverAddr), + addr: serverAddr, + port: serverPort, + }, } buf, err := res.marshal() if err != nil { - res = &response{reply: generalFailure} + res = errorResponse(generalFailure) buf, _ = res.marshal() } c.clientConn.Write(buf) @@ -246,6 +252,208 @@ func (c *Conn) handleRequest() error { return <-errc } +func (c *Conn) handleUDP() error { + // The DST.ADDR and DST.PORT fields contain the address and port that + // the client expects to use to send UDP datagrams on for the + // association. The server MAY use this information to limit access + // to the association. + // @see Page 6, https://datatracker.ietf.org/doc/html/rfc1928. + // + // We do NOT limit the access from the client currently in this implementation. + _ = c.request.destination + + addr := c.clientConn.LocalAddr() + host, _, err := net.SplitHostPort(addr.String()) + if err != nil { + return err + } + clientUDPConn, err := net.ListenPacket("udp", net.JoinHostPort(host, "0")) + if err != nil { + res := errorResponse(generalFailure) + buf, _ := res.marshal() + c.clientConn.Write(buf) + return err + } + defer clientUDPConn.Close() + + serverUDPConn, err := net.ListenPacket("udp", "[::]:0") + if err != nil { + res := errorResponse(generalFailure) + buf, _ := res.marshal() + c.clientConn.Write(buf) + return err + } + defer serverUDPConn.Close() + + bindAddr, bindPort, err := splitHostPort(clientUDPConn.LocalAddr().String()) + if err != nil { + return err + } + + res := &response{ + reply: success, + bindAddr: socksAddr{ + addrType: getAddrType(bindAddr), + addr: bindAddr, + port: bindPort, + }, + } + buf, err := res.marshal() + if err != nil { + res = errorResponse(generalFailure) + buf, _ = res.marshal() + } + c.clientConn.Write(buf) + + return c.transferUDP(c.clientConn, clientUDPConn, serverUDPConn) +} + +func (c *Conn) transferUDP(associatedTCP net.Conn, clientConn net.PacketConn, targetConn net.PacketConn) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const bufferSize = 8 * 1024 + const readTimeout = 5 * time.Second + + // client -> target + go func() { + defer cancel() + buf := make([]byte, bufferSize) + for { + select { + case <-ctx.Done(): + return + default: + err := c.handleUDPRequest(clientConn, targetConn, buf, readTimeout) + if err != nil { + if isTimeout(err) { + continue + } + if errors.Is(err, net.ErrClosed) { + return + } + c.logf("udp transfer: handle udp request fail: %v", err) + } + } + } + }() + + // target -> client + go func() { + defer cancel() + buf := make([]byte, bufferSize) + for { + select { + case <-ctx.Done(): + return + default: + err := c.handleUDPResponse(targetConn, clientConn, buf, readTimeout) + if err != nil { + if isTimeout(err) { + continue + } + if errors.Is(err, net.ErrClosed) { + return + } + c.logf("udp transfer: handle udp response fail: %v", err) + } + } + } + }() + + // A UDP association terminates when the TCP connection that the UDP + // ASSOCIATE request arrived on terminates. RFC1928 + _, err := io.Copy(io.Discard, associatedTCP) + if err != nil { + err = fmt.Errorf("udp associated tcp conn: %w", err) + } + return err +} + +func (c *Conn) handleUDPRequest( + clientConn net.PacketConn, + targetConn net.PacketConn, + buf []byte, + readTimeout time.Duration, +) error { + // add a deadline for the read to avoid blocking forever + _ = clientConn.SetReadDeadline(time.Now().Add(readTimeout)) + n, addr, err := clientConn.ReadFrom(buf) + if err != nil { + return fmt.Errorf("read from client: %w", err) + } + c.udpClientAddr = addr + req, data, err := parseUDPRequest(buf[:n]) + if err != nil { + return fmt.Errorf("parse udp request: %w", err) + } + targetAddr, err := net.ResolveUDPAddr("udp", req.addr.hostPort()) + if err != nil { + c.logf("resolve target addr fail: %v", err) + } + + nn, err := targetConn.WriteTo(data, targetAddr) + if err != nil { + return fmt.Errorf("write to target %s fail: %w", targetAddr, err) + } + if nn != len(data) { + return fmt.Errorf("write to target %s fail: %w", targetAddr, io.ErrShortWrite) + } + return nil +} + +func (c *Conn) handleUDPResponse( + targetConn net.PacketConn, + clientConn net.PacketConn, + buf []byte, + readTimeout time.Duration, +) error { + // add a deadline for the read to avoid blocking forever + _ = targetConn.SetReadDeadline(time.Now().Add(readTimeout)) + n, addr, err := targetConn.ReadFrom(buf) + if err != nil { + return fmt.Errorf("read from target: %w", err) + } + host, port, err := splitHostPort(addr.String()) + if err != nil { + return fmt.Errorf("split host port: %w", err) + } + hdr := udpRequest{addr: socksAddr{addrType: getAddrType(host), addr: host, port: port}} + pkt, err := hdr.marshal() + if err != nil { + return fmt.Errorf("marshal udp request: %w", err) + } + data := append(pkt, buf[:n]...) + // use addr from client to send back + nn, err := clientConn.WriteTo(data, c.udpClientAddr) + if err != nil { + return fmt.Errorf("write to client: %w", err) + } + if nn != len(data) { + return fmt.Errorf("write to client: %w", io.ErrShortWrite) + } + return nil +} + +func isTimeout(err error) bool { + terr, ok := errors.Unwrap(err).(interface{ Timeout() bool }) + return ok && terr.Timeout() +} + +func splitHostPort(hostport string) (host string, port uint16, err error) { + host, portStr, err := net.SplitHostPort(hostport) + if err != nil { + return "", 0, err + } + portInt, err := strconv.Atoi(portStr) + if err != nil { + return "", 0, err + } + if portInt < 0 || portInt > 65535 { + return "", 0, fmt.Errorf("invalid port number %d", portInt) + } + return host, uint16(portInt), nil +} + // parseClientGreeting parses a request initiation packet. func parseClientGreeting(r io.Reader, authMethod byte) error { var hdr [2]byte @@ -295,123 +503,205 @@ func parseClientAuth(r io.Reader) (usr, pwd string, err error) { return string(usrBytes), string(pwdBytes), nil } +func getAddrType(addr string) addrType { + if ip := net.ParseIP(addr); ip != nil { + if ip.To4() != nil { + return ipv4 + } + return ipv6 + } + return domainName +} + // request represents data contained within a SOCKS5 // connection request packet. type request struct { - command commandType - destination string - port uint16 - destAddrType addrType + command commandType + destination socksAddr } // parseClientRequest converts raw packet bytes into a // SOCKS5Request struct. func parseClientRequest(r io.Reader) (*request, error) { - var hdr [4]byte + var hdr [3]byte _, err := io.ReadFull(r, hdr[:]) if err != nil { return nil, fmt.Errorf("could not read packet header") } cmd := hdr[1] - destAddrType := addrType(hdr[3]) - var destination string - var port uint16 + destination, err := parseSocksAddr(r) + return &request{ + command: commandType(cmd), + destination: destination, + }, err +} + +type socksAddr struct { + addrType addrType + addr string + port uint16 +} + +var zeroSocksAddr = socksAddr{addrType: ipv4, addr: "0.0.0.0", port: 0} + +func parseSocksAddr(r io.Reader) (addr socksAddr, err error) { + var addrTypeData [1]byte + _, err = io.ReadFull(r, addrTypeData[:]) + if err != nil { + return socksAddr{}, fmt.Errorf("could not read address type") + } - if destAddrType == ipv4 { + dstAddrType := addrType(addrTypeData[0]) + var destination string + switch dstAddrType { + case ipv4: var ip [4]byte _, err = io.ReadFull(r, ip[:]) if err != nil { - return nil, fmt.Errorf("could not read IPv4 address") + return socksAddr{}, fmt.Errorf("could not read IPv4 address") } destination = net.IP(ip[:]).String() - } else if destAddrType == domainName { + case domainName: var dstSizeByte [1]byte _, err = io.ReadFull(r, dstSizeByte[:]) if err != nil { - return nil, fmt.Errorf("could not read domain name size") + return socksAddr{}, fmt.Errorf("could not read domain name size") } dstSize := int(dstSizeByte[0]) domainName := make([]byte, dstSize) _, err = io.ReadFull(r, domainName) if err != nil { - return nil, fmt.Errorf("could not read domain name") + return socksAddr{}, fmt.Errorf("could not read domain name") } destination = string(domainName) - } else if destAddrType == ipv6 { + case ipv6: var ip [16]byte _, err = io.ReadFull(r, ip[:]) if err != nil { - return nil, fmt.Errorf("could not read IPv6 address") + return socksAddr{}, fmt.Errorf("could not read IPv6 address") } destination = net.IP(ip[:]).String() - } else { - return nil, fmt.Errorf("unsupported address type") + default: + return socksAddr{}, fmt.Errorf("unsupported address type") } var portBytes [2]byte _, err = io.ReadFull(r, portBytes[:]) if err != nil { - return nil, fmt.Errorf("could not read port") + return socksAddr{}, fmt.Errorf("could not read port") } - port = binary.BigEndian.Uint16(portBytes[:]) - - return &request{ - command: commandType(cmd), - destination: destination, - port: port, - destAddrType: destAddrType, + port := binary.BigEndian.Uint16(portBytes[:]) + return socksAddr{ + addrType: dstAddrType, + addr: destination, + port: port, }, nil } +func (s socksAddr) marshal() ([]byte, error) { + var addr []byte + switch s.addrType { + case ipv4: + addr = net.ParseIP(s.addr).To4() + if addr == nil { + return nil, fmt.Errorf("invalid IPv4 address for binding") + } + case domainName: + if len(s.addr) > 255 { + return nil, fmt.Errorf("invalid domain name for binding") + } + addr = make([]byte, 0, len(s.addr)+1) + addr = append(addr, byte(len(s.addr))) + addr = append(addr, []byte(s.addr)...) + case ipv6: + addr = net.ParseIP(s.addr).To16() + if addr == nil { + return nil, fmt.Errorf("invalid IPv6 address for binding") + } + default: + return nil, fmt.Errorf("unsupported address type") + } + + pkt := []byte{byte(s.addrType)} + pkt = append(pkt, addr...) + pkt = binary.BigEndian.AppendUint16(pkt, s.port) + return pkt, nil +} +func (s socksAddr) hostPort() string { + return net.JoinHostPort(s.addr, strconv.Itoa(int(s.port))) +} + // response contains the contents of // a response packet sent from the proxy // to the client. type response struct { - reply replyCode - bindAddrType addrType - bindAddr string - bindPort uint16 + reply replyCode + bindAddr socksAddr +} + +func errorResponse(code replyCode) *response { + return &response{reply: code, bindAddr: zeroSocksAddr} } // marshal converts a SOCKS5Response struct into // a packet. If res.reply == Success, it may throw an error on // receiving an invalid bind address. Otherwise, it will not throw. func (res *response) marshal() ([]byte, error) { - pkt := make([]byte, 4) + pkt := make([]byte, 3) pkt[0] = socks5Version pkt[1] = byte(res.reply) pkt[2] = 0 // null reserved byte - pkt[3] = byte(res.bindAddrType) - if res.reply != success { - return pkt, nil + addrPkt, err := res.bindAddr.marshal() + if err != nil { + return nil, err } - var addr []byte - switch res.bindAddrType { - case ipv4: - addr = net.ParseIP(res.bindAddr).To4() - if addr == nil { - return nil, fmt.Errorf("invalid IPv4 address for binding") - } - case domainName: - if len(res.bindAddr) > 255 { - return nil, fmt.Errorf("invalid domain name for binding") - } - addr = make([]byte, 0, len(res.bindAddr)+1) - addr = append(addr, byte(len(res.bindAddr))) - addr = append(addr, []byte(res.bindAddr)...) - case ipv6: - addr = net.ParseIP(res.bindAddr).To16() - if addr == nil { - return nil, fmt.Errorf("invalid IPv6 address for binding") - } - default: - return nil, fmt.Errorf("unsupported address type") + return append(pkt, addrPkt...), nil +} + +type udpRequest struct { + frag byte + addr socksAddr +} + +// +----+------+------+----------+----------+----------+ +// |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA | +// +----+------+------+----------+----------+----------+ +// | 2 | 1 | 1 | Variable | 2 | Variable | +// +----+------+------+----------+----------+----------+ +func parseUDPRequest(data []byte) (*udpRequest, []byte, error) { + if len(data) < 4 { + return nil, nil, fmt.Errorf("invalid packet length") } - pkt = append(pkt, addr...) - pkt = binary.BigEndian.AppendUint16(pkt, uint16(res.bindPort)) + // reserved bytes + if !(data[0] == 0 && data[1] == 0) { + return nil, nil, fmt.Errorf("invalid udp request header") + } - return pkt, nil + frag := data[2] + + reader := bytes.NewReader(data[3:]) + addr, err := parseSocksAddr(reader) + bodyLen := reader.Len() // (*bytes.Reader).Len() return unread data length + body := data[len(data)-bodyLen:] + return &udpRequest{ + frag: frag, + addr: addr, + }, body, err +} + +func (u *udpRequest) marshal() ([]byte, error) { + pkt := make([]byte, 3) + pkt[0] = 0 + pkt[1] = 0 + pkt[2] = u.frag + + addrPkt, err := u.addr.marshal() + if err != nil { + return nil, err + } + + return append(pkt, addrPkt...), nil } diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go index 201a6657532f9..11ea59d4b57d1 100644 --- a/net/socks5/socks5_test.go +++ b/net/socks5/socks5_test.go @@ -4,6 +4,7 @@ package socks5 import ( + "bytes" "errors" "fmt" "io" @@ -32,6 +33,19 @@ func backendServer(listener net.Listener) { listener.Close() } +func udpEchoServer(conn net.PacketConn) { + var buf [1024]byte + n, addr, err := conn.ReadFrom(buf[:]) + if err != nil { + panic(err) + } + _, err = conn.WriteTo(buf[:n], addr) + if err != nil { + panic(err) + } + conn.Close() +} + func TestRead(t *testing.T) { // backend server which we'll use SOCKS5 to connect to listener, err := net.Listen("tcp", ":0") @@ -152,3 +166,102 @@ func TestReadPassword(t *testing.T) { t.Fatal(err) } } + +func TestUDP(t *testing.T) { + // backend UDP server which we'll use SOCKS5 to connect to + listener, err := net.ListenPacket("udp", ":0") + if err != nil { + t.Fatal(err) + } + backendServerPort := listener.LocalAddr().(*net.UDPAddr).Port + go udpEchoServer(listener) + + // SOCKS5 server + socks5, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + socks5Port := socks5.Addr().(*net.TCPAddr).Port + go socks5Server(socks5) + + // net/proxy don't support UDP, so we need to manually send the SOCKS5 UDP request + conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", socks5Port)) + if err != nil { + t.Fatal(err) + } + _, err = conn.Write([]byte{0x05, 0x01, 0x00}) // client hello with no auth + if err != nil { + t.Fatal(err) + } + buf := make([]byte, 1024) + n, err := conn.Read(buf) // server hello + if err != nil { + t.Fatal(err) + } + if n != 2 || buf[0] != 0x05 || buf[1] != 0x00 { + t.Fatalf("got: %q want: 0x05 0x00", buf[:n]) + } + + targetAddr := socksAddr{ + addrType: domainName, + addr: "localhost", + port: uint16(backendServerPort), + } + targetAddrPkt, err := targetAddr.marshal() + if err != nil { + t.Fatal(err) + } + _, err = conn.Write(append([]byte{0x05, 0x03, 0x00}, targetAddrPkt...)) // client reqeust + if err != nil { + t.Fatal(err) + } + + n, err = conn.Read(buf) // server response + if err != nil { + t.Fatal(err) + } + if n < 3 || !bytes.Equal(buf[:3], []byte{0x05, 0x00, 0x00}) { + t.Fatalf("got: %q want: 0x05 0x00 0x00", buf[:n]) + } + udpProxySocksAddr, err := parseSocksAddr(bytes.NewReader(buf[3:n])) + if err != nil { + t.Fatal(err) + } + + udpProxyAddr, err := net.ResolveUDPAddr("udp", udpProxySocksAddr.hostPort()) + if err != nil { + t.Fatal(err) + } + udpConn, err := net.DialUDP("udp", nil, udpProxyAddr) + if err != nil { + t.Fatal(err) + } + udpPayload, err := (&udpRequest{addr: targetAddr}).marshal() + if err != nil { + t.Fatal(err) + } + udpPayload = append(udpPayload, []byte("Test")...) + _, err = udpConn.Write(udpPayload) // send udp package + if err != nil { + t.Fatal(err) + } + n, _, err = udpConn.ReadFrom(buf) + if err != nil { + t.Fatal(err) + } + _, responseBody, err := parseUDPRequest(buf[:n]) // read udp response + if err != nil { + t.Fatal(err) + } + if string(responseBody) != "Test" { + t.Fatalf("got: %q want: Test", responseBody) + } + err = udpConn.Close() + if err != nil { + t.Fatal(err) + } + err = conn.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 087d2fbce1687..a49e7f0f730ee 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -26,6 +26,7 @@ import ( "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/hostinfo" ) var counterFallbackOK int32 // atomic @@ -76,17 +77,36 @@ func Config(host string, ht *health.Tracker, base *tls.Config) *tls.Config { // own cert verification, as do the same work that it'd do // (with the baked-in fallback root) in the VerifyConnection hook. conf.InsecureSkipVerify = true - conf.VerifyConnection = func(cs tls.ConnectionState) error { + conf.VerifyConnection = func(cs tls.ConnectionState) (retErr error) { + if host == "log.tailscale.io" && hostinfo.IsNATLabGuestVM() { + // Allow log.tailscale.io TLS MITM for integration tests when + // the client's running within a NATLab VM. + return nil + } + // Perform some health checks on this certificate before we do // any verification. + var selfSignedIssuer string + if certs := cs.PeerCertificates; len(certs) > 0 && certIsSelfSigned(certs[0]) { + selfSignedIssuer = certs[0].Issuer.String() + } if ht != nil { - if certIsSelfSigned(cs.PeerCertificates[0]) { - // Self-signed certs are never valid. - ht.SetTLSConnectionError(cs.ServerName, fmt.Errorf("certificate is self-signed")) - } else { - // Ensure we clear any error state for this ServerName. - ht.SetTLSConnectionError(cs.ServerName, nil) - } + defer func() { + if retErr != nil && selfSignedIssuer != "" { + // Self-signed certs are never valid. + // + // TODO(bradfitz): plumb down the selfSignedIssuer as a + // structured health warning argument. + ht.SetTLSConnectionError(cs.ServerName, fmt.Errorf("likely intercepted connection; certificate is self-signed by %v", selfSignedIssuer)) + } else { + // Ensure we clear any error state for this ServerName. + ht.SetTLSConnectionError(cs.ServerName, nil) + if selfSignedIssuer != "" { + // Log the self-signed issuer, but don't treat it as an error. + log.Printf("tlsdial: warning: server cert for %q passed x509 validation but is self-signed by %q", host, selfSignedIssuer) + } + } + }() } // First try doing x509 verification with the system's diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index f0c4b10421f5e..3606dd67f7ea2 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -166,6 +166,7 @@ func (d *Dialer) Close() error { c.Close() } d.activeSysConns = nil + d.PeerAPITransport().CloseIdleConnections() return nil } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 8ea73b4b20b13..24defba27a782 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -10,6 +10,7 @@ import ( "net/netip" "os" "reflect" + "runtime" "slices" "strings" "sync" @@ -17,6 +18,7 @@ import ( "time" "github.com/gaissmai/bart" + "github.com/tailscale/wireguard-go/conn" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "go4.org/mem" @@ -160,6 +162,10 @@ type Wrapper struct { PreFilterPacketInboundFromWireGuard FilterFunc // PostFilterPacketInboundFromWireGuard is the inbound filter function that runs after the main filter. PostFilterPacketInboundFromWireGuard FilterFunc + // EndPacketVectorInboundFromWireGuardFlush is a function that runs after all packets in a given vector + // have been handled by all filters. Filters may queue packets for the purposes of GRO, requiring an + // explicit flush. + EndPacketVectorInboundFromWireGuardFlush func() // PreFilterPacketOutboundToWireGuardNetstackIntercept is a filter function that runs before the main filter // for packets from the local system. This filter is populated by netstack to hook // packets that should be handled by netstack. If set, this filter runs before @@ -894,13 +900,7 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { return 0, res.err } if res.data == nil { - n, err := t.injectedRead(res.injected, buffs[0], offset) - sizes[0] = n - if err != nil && n == 0 { - return 0, err - } - - return 1, err + return t.injectedRead(res.injected, buffs, sizes, offset) } metricPacketOut.Add(int64(len(res.data))) @@ -955,27 +955,85 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { return buffsPos, res.err } -// injectedRead handles injected reads, which bypass filters. -func (t *Wrapper) injectedRead(res tunInjectedRead, buf []byte, offset int) (int, error) { - metricPacketOut.Add(1) +const ( + minTCPHeaderSize = 20 +) + +func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { + options := tun.GSOOptions{ + CsumStart: gso.L3HdrLen, + CsumOffset: gso.CsumOffset, + GSOSize: gso.MSS, + NeedsCsum: gso.NeedsCsum, + } + switch gso.Type { + case stack.GSONone: + options.GSOType = tun.GSONone + return options, nil + case stack.GSOTCPv4: + options.GSOType = tun.GSOTCPv4 + case stack.GSOTCPv6: + options.GSOType = tun.GSOTCPv6 + default: + return tun.GSOOptions{}, fmt.Errorf("unsupported gVisor GSOType: %v", gso.Type) + } + // options.HdrLen is both layer 3 and 4 together, whereas gVisor only + // gives us layer 3 length. We have to gather TCP header length + // ourselves. + if len(pkt) < int(gso.L3HdrLen)+minTCPHeaderSize { + return tun.GSOOptions{}, errors.New("gVisor GSOTCP packet length too short") + } + tcphLen := uint16(pkt[int(gso.L3HdrLen)+12] >> 4 * 4) + options.HdrLen = gso.L3HdrLen + tcphLen + return options, nil +} - var n int - if !res.packet.IsNil() { +func invertGSOChecksum(pkt []byte, gso stack.GSO) { + if gso.NeedsCsum != true { + return + } + at := int(gso.L3HdrLen + gso.CsumOffset) + if at+1 > len(pkt)-1 { + return + } + pkt[at] = ^pkt[at] + pkt[at+1] = ^pkt[at+1] +} - n = copy(buf[offset:], res.packet.NetworkHeader().Slice()) - n += copy(buf[offset+n:], res.packet.TransportHeader().Slice()) - n += copy(buf[offset+n:], res.packet.Data().AsRange().ToSlice()) - res.packet.DecRef() +// injectedRead handles injected reads, which bypass filters. +func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []int, offset int) (n int, err error) { + var gso stack.GSO + + pkt := outBuffs[0][offset:] + if res.packet != nil { + bufN := copy(pkt, res.packet.NetworkHeader().Slice()) + bufN += copy(pkt[bufN:], res.packet.TransportHeader().Slice()) + bufN += copy(pkt[bufN:], res.packet.Data().AsRange().ToSlice()) + gso = res.packet.GSOOptions + pkt = pkt[:bufN] + defer res.packet.DecRef() // defer DecRef so we may continue to reference it } else { - n = copy(buf[offset:], res.data) + sizes[0] = copy(pkt, res.data) + pkt = pkt[:sizes[0]] + n = 1 } pc := t.peerConfig.Load() p := parsedPacketPool.Get().(*packet.Parsed) defer parsedPacketPool.Put(p) - p.Decode(buf[offset : offset+n]) + p.Decode(pkt) + + // We invert the transport layer checksum before and after snat() if gVisor + // handed us a segment with a partial checksum. A partial checksum is not a + // ones' complement of the sum, and incremental checksum updating that could + // occur as a result of snat() is not aware of this. Alternatively we could + // plumb partial transport layer checksum awareness down through snat(), + // but the surface area of such a change is much larger, and not yet + // justified by this singular case. + invertGSOChecksum(pkt, gso) pc.snat(p) + invertGSOChecksum(pkt, gso) if m := t.destIPActivity.Load(); m != nil { if fn := m[p.Dst.Addr()]; fn != nil { @@ -983,11 +1041,24 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, buf []byte, offset int) (int } } + if res.packet != nil { + var gsoOptions tun.GSOOptions + gsoOptions, err = stackGSOToTunGSO(pkt, gso) + if err != nil { + return 0, err + } + n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) + } + if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(buf[offset:][:n]) + for i := 0; i < n; i++ { + stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + } } + t.noteActivity() - return n, nil + metricPacketOut.Add(int64(n)) + return n, err } func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook capture.Callback, pc *peerConfigTable) filter.Response { @@ -1112,6 +1183,9 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } } } + if t.EndPacketVectorInboundFromWireGuardFlush != nil { + t.EndPacketVectorInboundFromWireGuardFlush() + } if t.disableFilter { i = len(buffs) } @@ -1288,6 +1362,14 @@ func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error { } func (t *Wrapper) BatchSize() int { + if runtime.GOOS == "linux" { + // Always setup Linux to handle vectors, even in the very rare case that + // the underlying t.tdev returns 1. gVisor GSO is always enabled for + // Linux, and we cannot make a determination on gVisor usage at + // wireguard-go.Device startup, which is when this value matters for + // packet memory init. + return conn.IdealBatchSize + } return t.tdev.BatchSize() } diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 2e952f14f8de8..22b511ea81273 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -3,7 +3,7 @@ // Package wsconn contains an adapter type that turns // a websocket connection into a net.Conn. It a temporary fork of the -// netconn.go file from the nhooyr.io/websocket package while we wait for +// netconn.go file from the github.com/coder/websocket package while we wait for // https://github.com/nhooyr/websocket/pull/350 to be merged. package wsconn @@ -18,7 +18,7 @@ import ( "sync/atomic" "time" - "nhooyr.io/websocket" + "github.com/coder/websocket" ) // NetConn converts a *websocket.Conn into a net.Conn. diff --git a/posture/hwaddr.go b/posture/hwaddr.go index a38cc5be0352e..dd0b6d8be77ce 100644 --- a/posture/hwaddr.go +++ b/posture/hwaddr.go @@ -22,5 +22,5 @@ func GetHardwareAddrs() (hwaddrs []string, err error) { } }) slices.Sort(hwaddrs) - return + return slices.Compact(hwaddrs), err } diff --git a/prober/prober.go b/prober/prober.go index 36afb1a39659c..2a43628bda908 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -7,19 +7,26 @@ package prober import ( + "container/ring" "context" - "errors" + "encoding/json" "fmt" "hash/fnv" "log" "maps" "math/rand" + "net/http" "sync" "time" "github.com/prometheus/client_golang/prometheus" + "tailscale.com/tsweb" ) +// recentHistSize is the number of recent probe results and latencies to keep +// in memory. +const recentHistSize = 10 + // ProbeClass defines a probe of a specific type: a probing function that will // be regularly ran, and metric labels that will be added automatically to all // probes using this class. @@ -106,6 +113,14 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob l[k] = v } + probe := newProbe(p, name, interval, l, pc) + p.probes[name] = probe + go probe.loop() + return probe +} + +// newProbe creates a new Probe with the given parameters, but does not start it. +func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe { ctx, cancel := context.WithCancel(context.Background()) probe := &Probe{ prober: p, @@ -117,6 +132,9 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob probeClass: pc, interval: interval, initialDelay: initialDelay(name, interval), + successHist: ring.New(recentHistSize), + latencyHist: ring.New(recentHistSize), + metrics: prometheus.NewRegistry(), metricLabels: l, mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l), @@ -131,15 +149,14 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l, }, []string{"status"}), } - - prometheus.WrapRegistererWithPrefix(p.namespace+"_", p.metrics).MustRegister(probe.metrics) + if p.metrics != nil { + prometheus.WrapRegistererWithPrefix(p.namespace+"_", p.metrics).MustRegister(probe.metrics) + } probe.metrics.MustRegister(probe) - - p.probes[name] = probe - go probe.loop() return probe } +// unregister removes a probe from the prober's internal state. func (p *Prober) unregister(probe *Probe) { p.mu.Lock() defer p.mu.Unlock() @@ -206,6 +223,7 @@ type Probe struct { ctx context.Context cancel context.CancelFunc // run to initiate shutdown stopped chan struct{} // closed when shutdown is complete + runMu sync.Mutex // ensures only one probe runs at a time name string probeClass ProbeClass @@ -232,6 +250,10 @@ type Probe struct { latency time.Duration // last successful probe latency succeeded bool // whether the last doProbe call succeeded lastErr error + + // History of recent probe results and latencies. + successHist *ring.Ring + latencyHist *ring.Ring } // Close shuts down the Probe and unregisters it from its Prober. @@ -278,13 +300,17 @@ func (p *Probe) loop() { } } -// run invokes fun and records the results. +// run invokes the probe function and records the result. It returns the probe +// result and an error if the probe failed. // -// fun is invoked with a timeout slightly less than interval, so that -// the probe either succeeds or fails before the next cycle is -// scheduled to start. -func (p *Probe) run() { - start := p.recordStart() +// The probe function is invoked with a timeout slightly less than interval, so +// that the probe either succeeds or fails before the next cycle is scheduled to +// start. +func (p *Probe) run() (pi ProbeInfo, err error) { + p.runMu.Lock() + defer p.runMu.Unlock() + + p.recordStart() defer func() { // Prevent a panic within one probe function from killing the // entire prober, so that a single buggy probe doesn't destroy @@ -293,29 +319,30 @@ func (p *Probe) run() { // alert for debugging. if r := recover(); r != nil { log.Printf("probe %s panicked: %v", p.name, r) - p.recordEnd(start, errors.New("panic")) + err = fmt.Errorf("panic: %v", r) + p.recordEnd(err) } }() timeout := time.Duration(float64(p.interval) * 0.8) ctx, cancel := context.WithTimeout(p.ctx, timeout) defer cancel() - err := p.probeClass.Probe(ctx) - p.recordEnd(start, err) + err = p.probeClass.Probe(ctx) + p.recordEnd(err) if err != nil { log.Printf("probe %s: %v", p.name, err) } + pi = p.probeInfoLocked() + return } -func (p *Probe) recordStart() time.Time { - st := p.prober.now() +func (p *Probe) recordStart() { p.mu.Lock() - defer p.mu.Unlock() - p.start = st - return st + p.start = p.prober.now() + p.mu.Unlock() } -func (p *Probe) recordEnd(start time.Time, err error) { +func (p *Probe) recordEnd(err error) { end := p.prober.now() p.mu.Lock() defer p.mu.Unlock() @@ -327,22 +354,55 @@ func (p *Probe) recordEnd(start time.Time, err error) { p.latency = latency p.mAttempts.WithLabelValues("ok").Inc() p.mSeconds.WithLabelValues("ok").Add(latency.Seconds()) + p.latencyHist.Value = latency + p.latencyHist = p.latencyHist.Next() } else { p.latency = 0 p.mAttempts.WithLabelValues("fail").Inc() p.mSeconds.WithLabelValues("fail").Add(latency.Seconds()) } + p.successHist.Value = p.succeeded + p.successHist = p.successHist.Next() } -// ProbeInfo is the state of a Probe. +// ProbeInfo is a snapshot of the configuration and state of a Probe. type ProbeInfo struct { - Start time.Time - End time.Time - Latency string - Result bool - Error string + Name string + Class string + Interval time.Duration + Labels map[string]string + Start time.Time + End time.Time + Latency time.Duration + Result bool + Error string + RecentResults []bool + RecentLatencies []time.Duration +} + +// RecentSuccessRatio returns the success ratio of the probe in the recent history. +func (pb ProbeInfo) RecentSuccessRatio() float64 { + if len(pb.RecentResults) == 0 { + return 0 + } + var sum int + for _, r := range pb.RecentResults { + if r { + sum++ + } + } + return float64(sum) / float64(len(pb.RecentResults)) } +// RecentMedianLatency returns the median latency of the probe in the recent history. +func (pb ProbeInfo) RecentMedianLatency() time.Duration { + if len(pb.RecentLatencies) == 0 { + return 0 + } + return pb.RecentLatencies[len(pb.RecentLatencies)/2] +} + +// ProbeInfo returns the state of all probes. func (p *Prober) ProbeInfo() map[string]ProbeInfo { out := map[string]ProbeInfo{} @@ -352,24 +412,99 @@ func (p *Prober) ProbeInfo() map[string]ProbeInfo { probes = append(probes, probe) } p.mu.Unlock() - for _, probe := range probes { probe.mu.Lock() - inf := ProbeInfo{ - Start: probe.start, - End: probe.end, - Result: probe.succeeded, + out[probe.name] = probe.probeInfoLocked() + probe.mu.Unlock() + } + return out +} + +// probeInfoLocked returns the state of the probe. +func (probe *Probe) probeInfoLocked() ProbeInfo { + inf := ProbeInfo{ + Name: probe.name, + Class: probe.probeClass.Class, + Interval: probe.interval, + Labels: probe.metricLabels, + Start: probe.start, + End: probe.end, + Result: probe.succeeded, + } + if probe.lastErr != nil { + inf.Error = probe.lastErr.Error() + } + if probe.latency > 0 { + inf.Latency = probe.latency + } + probe.latencyHist.Do(func(v any) { + if l, ok := v.(time.Duration); ok { + inf.RecentLatencies = append(inf.RecentLatencies, l) + } + }) + probe.successHist.Do(func(v any) { + if r, ok := v.(bool); ok { + inf.RecentResults = append(inf.RecentResults, r) } - if probe.lastErr != nil { - inf.Error = probe.lastErr.Error() + }) + return inf +} + +// RunHandlerResponse is the JSON response format for the RunHandler. +type RunHandlerResponse struct { + ProbeInfo ProbeInfo + PreviousSuccessRatio float64 + PreviousMedianLatency time.Duration +} + +// RunHandler runs a probe by name and returns the result as an HTTP response. +func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { + // Look up prober by name. + name := r.FormValue("name") + if name == "" { + return tsweb.Error(http.StatusBadRequest, "missing name parameter", nil) + } + p.mu.Lock() + probe, ok := p.probes[name] + p.mu.Unlock() + if !ok { + return tsweb.Error(http.StatusNotFound, fmt.Sprintf("unknown probe %q", name), nil) + } + + probe.mu.Lock() + prevInfo := probe.probeInfoLocked() + probe.mu.Unlock() + + info, err := probe.run() + respStatus := http.StatusOK + if err != nil { + respStatus = http.StatusFailedDependency + } + + // Return serialized JSON response if the client requested JSON + if r.Header.Get("Accept") == "application/json" { + resp := &RunHandlerResponse{ + ProbeInfo: info, + PreviousSuccessRatio: prevInfo.RecentSuccessRatio(), + PreviousMedianLatency: prevInfo.RecentMedianLatency(), } - if probe.latency > 0 { - inf.Latency = probe.latency.String() + w.WriteHeader(respStatus) + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(resp); err != nil { + return tsweb.Error(http.StatusInternalServerError, "error encoding JSON response", err) } - out[probe.name] = inf - probe.mu.Unlock() + return nil } - return out + + stats := fmt.Sprintf("Last %d probes: success rate %d%%, median latency %v\n", + len(prevInfo.RecentResults), + int(prevInfo.RecentSuccessRatio()*100), prevInfo.RecentMedianLatency()) + if err != nil { + return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err) + } + w.WriteHeader(respStatus) + w.Write([]byte(fmt.Sprintf("Probe succeeded in %v\n%s", info.Latency, stats))) + return nil } // Describe implements prometheus.Collector. diff --git a/prober/prober_test.go b/prober/prober_test.go index af645ef004d92..742a914b24661 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -5,16 +5,22 @@ package prober import ( "context" + "encoding/json" "errors" "fmt" + "io" + "net/http/httptest" "strings" "sync" "sync/atomic" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/prometheus/client_golang/prometheus/testutil" "tailscale.com/tstest" + "tailscale.com/tsweb" ) const ( @@ -292,6 +298,254 @@ func TestOnceMode(t *testing.T) { } } +func TestProberProbeInfo(t *testing.T) { + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + + p.Run("probe1", probeInterval, nil, FuncProbe(func(context.Context) error { + clk.Advance(500 * time.Millisecond) + return nil + })) + p.Run("probe2", probeInterval, nil, FuncProbe(func(context.Context) error { return fmt.Errorf("error2") })) + p.Wait() + + info := p.ProbeInfo() + wantInfo := map[string]ProbeInfo{ + "probe1": { + Name: "probe1", + Interval: probeInterval, + Labels: map[string]string{"class": "", "name": "probe1"}, + Latency: 500 * time.Millisecond, + Result: true, + RecentResults: []bool{true}, + RecentLatencies: []time.Duration{500 * time.Millisecond}, + }, + "probe2": { + Name: "probe2", + Interval: probeInterval, + Labels: map[string]string{"class": "", "name": "probe2"}, + Error: "error2", + RecentResults: []bool{false}, + RecentLatencies: nil, // no latency for failed probes + }, + } + + if diff := cmp.Diff(wantInfo, info, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End")); diff != "" { + t.Fatalf("unexpected ProbeInfo (-want +got):\n%s", diff) + } +} + +func TestProbeInfoRecent(t *testing.T) { + type probeResult struct { + latency time.Duration + err error + } + tests := []struct { + name string + results []probeResult + wantProbeInfo ProbeInfo + wantRecentSuccessRatio float64 + wantRecentMedianLatency time.Duration + }{ + { + name: "no_runs", + wantProbeInfo: ProbeInfo{}, + wantRecentSuccessRatio: 0, + wantRecentMedianLatency: 0, + }, + { + name: "single_success", + results: []probeResult{{latency: 100 * time.Millisecond, err: nil}}, + wantProbeInfo: ProbeInfo{ + Latency: 100 * time.Millisecond, + Result: true, + RecentResults: []bool{true}, + RecentLatencies: []time.Duration{100 * time.Millisecond}, + }, + wantRecentSuccessRatio: 1, + wantRecentMedianLatency: 100 * time.Millisecond, + }, + { + name: "single_failure", + results: []probeResult{{latency: 100 * time.Millisecond, err: errors.New("error123")}}, + wantProbeInfo: ProbeInfo{ + Result: false, + RecentResults: []bool{false}, + RecentLatencies: nil, + Error: "error123", + }, + wantRecentSuccessRatio: 0, + wantRecentMedianLatency: 0, + }, + { + name: "recent_mix", + results: []probeResult{ + {latency: 10 * time.Millisecond, err: errors.New("error1")}, + {latency: 20 * time.Millisecond, err: nil}, + {latency: 30 * time.Millisecond, err: nil}, + {latency: 40 * time.Millisecond, err: errors.New("error4")}, + {latency: 50 * time.Millisecond, err: nil}, + {latency: 60 * time.Millisecond, err: nil}, + {latency: 70 * time.Millisecond, err: errors.New("error7")}, + {latency: 80 * time.Millisecond, err: nil}, + }, + wantProbeInfo: ProbeInfo{ + Result: true, + Latency: 80 * time.Millisecond, + RecentResults: []bool{false, true, true, false, true, true, false, true}, + RecentLatencies: []time.Duration{ + 20 * time.Millisecond, + 30 * time.Millisecond, + 50 * time.Millisecond, + 60 * time.Millisecond, + 80 * time.Millisecond, + }, + }, + wantRecentSuccessRatio: 0.625, + wantRecentMedianLatency: 50 * time.Millisecond, + }, + { + name: "only_last_10", + results: []probeResult{ + {latency: 10 * time.Millisecond, err: errors.New("old_error")}, + {latency: 20 * time.Millisecond, err: nil}, + {latency: 30 * time.Millisecond, err: nil}, + {latency: 40 * time.Millisecond, err: nil}, + {latency: 50 * time.Millisecond, err: nil}, + {latency: 60 * time.Millisecond, err: nil}, + {latency: 70 * time.Millisecond, err: nil}, + {latency: 80 * time.Millisecond, err: nil}, + {latency: 90 * time.Millisecond, err: nil}, + {latency: 100 * time.Millisecond, err: nil}, + {latency: 110 * time.Millisecond, err: nil}, + }, + wantProbeInfo: ProbeInfo{ + Result: true, + Latency: 110 * time.Millisecond, + RecentResults: []bool{true, true, true, true, true, true, true, true, true, true}, + RecentLatencies: []time.Duration{ + 20 * time.Millisecond, + 30 * time.Millisecond, + 40 * time.Millisecond, + 50 * time.Millisecond, + 60 * time.Millisecond, + 70 * time.Millisecond, + 80 * time.Millisecond, + 90 * time.Millisecond, + 100 * time.Millisecond, + 110 * time.Millisecond, + }, + }, + wantRecentSuccessRatio: 1, + wantRecentMedianLatency: 70 * time.Millisecond, + }, + } + + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + probe := newProbe(p, "", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + for _, r := range tt.results { + probe.recordStart() + clk.Advance(r.latency) + probe.recordEnd(r.err) + } + info := probe.probeInfoLocked() + if diff := cmp.Diff(tt.wantProbeInfo, info, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Interval")); diff != "" { + t.Fatalf("unexpected ProbeInfo (-want +got):\n%s", diff) + } + if got := info.RecentSuccessRatio(); got != tt.wantRecentSuccessRatio { + t.Errorf("recentSuccessRatio() = %v, want %v", got, tt.wantRecentSuccessRatio) + } + if got := info.RecentMedianLatency(); got != tt.wantRecentMedianLatency { + t.Errorf("recentMedianLatency() = %v, want %v", got, tt.wantRecentMedianLatency) + } + }) + } +} + +func TestProberRunHandler(t *testing.T) { + clk := newFakeTime() + + tests := []struct { + name string + probeFunc func(context.Context) error + wantResponseCode int + wantJSONResponse RunHandlerResponse + wantPlaintextResponse string + }{ + { + name: "success", + probeFunc: func(context.Context) error { return nil }, + wantResponseCode: 200, + wantJSONResponse: RunHandlerResponse{ + ProbeInfo: ProbeInfo{ + Name: "success", + Interval: probeInterval, + Result: true, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + wantPlaintextResponse: "Probe succeeded", + }, + { + name: "failure", + probeFunc: func(context.Context) error { return fmt.Errorf("error123") }, + wantResponseCode: 424, + wantJSONResponse: RunHandlerResponse{ + ProbeInfo: ProbeInfo{ + Name: "failure", + Interval: probeInterval, + Result: false, + Error: "error123", + RecentResults: []bool{false, false}, + }, + }, + wantPlaintextResponse: "Probe failed", + }, + } + + for _, tt := range tests { + for _, reqJSON := range []bool{true, false} { + t.Run(fmt.Sprintf("%s_json-%v", tt.name, reqJSON), func(t *testing.T) { + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + probe := p.Run(tt.name, probeInterval, nil, FuncProbe(tt.probeFunc)) + defer probe.Close() + <-probe.stopped // wait for the first run. + + w := httptest.NewRecorder() + + req := httptest.NewRequest("GET", "/prober/run/?name="+tt.name, nil) + if reqJSON { + req.Header.Set("Accept", "application/json") + } + tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{}).ServeHTTP(w, req) + if w.Result().StatusCode != tt.wantResponseCode { + t.Errorf("unexpected response code: got %d, want %d", w.Code, tt.wantResponseCode) + } + + if reqJSON { + var gotJSON RunHandlerResponse + if err := json.Unmarshal(w.Body.Bytes(), &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, w.Body.String()) + } + if diff := cmp.Diff(tt.wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } + } else { + body, _ := io.ReadAll(w.Result().Body) + if !strings.Contains(string(body), tt.wantPlaintextResponse) { + t.Errorf("unexpected response body: got %q, want to contain %q", body, tt.wantPlaintextResponse) + } + } + }) + } + } + +} + type fakeTicker struct { ch chan time.Time interval time.Duration diff --git a/prober/status.go b/prober/status.go new file mode 100644 index 0000000000000..aa9ef99d05d2c --- /dev/null +++ b/prober/status.go @@ -0,0 +1,124 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package prober + +import ( + "embed" + "fmt" + "html/template" + "net/http" + "strings" + "time" + + "tailscale.com/tsweb" + "tailscale.com/util/mak" +) + +//go:embed status.html +var statusFiles embed.FS +var statusTpl = template.Must(template.ParseFS(statusFiles, "status.html")) + +type statusHandlerOpt func(*statusHandlerParams) +type statusHandlerParams struct { + title string + + pageLinks map[string]string + probeLinks map[string]string +} + +// WithTitle sets the title of the status page. +func WithTitle(title string) statusHandlerOpt { + return func(opts *statusHandlerParams) { + opts.title = title + } +} + +// WithPageLink adds a top-level link to the status page. +func WithPageLink(text, url string) statusHandlerOpt { + return func(opts *statusHandlerParams) { + mak.Set(&opts.pageLinks, text, url) + } +} + +// WithProbeLink adds a link to each probe on the status page. +// The textTpl and urlTpl are Go templates that will be rendered +// with the respective ProbeInfo struct as the data. +func WithProbeLink(textTpl, urlTpl string) statusHandlerOpt { + return func(opts *statusHandlerParams) { + mak.Set(&opts.probeLinks, textTpl, urlTpl) + } +} + +// StatusHandler is a handler for the probe overview HTTP endpoint. +// It shows a list of probes and their current status. +func (p *Prober) StatusHandler(opts ...statusHandlerOpt) tsweb.ReturnHandlerFunc { + params := &statusHandlerParams{ + title: "Prober Status", + } + for _, opt := range opts { + opt(params) + } + return func(w http.ResponseWriter, r *http.Request) error { + type probeStatus struct { + ProbeInfo + TimeSinceLast time.Duration + Links map[string]template.URL + } + vars := struct { + Title string + Links map[string]template.URL + TotalProbes int64 + UnhealthyProbes int64 + Probes map[string]probeStatus + }{ + Title: params.title, + } + + for text, url := range params.pageLinks { + mak.Set(&vars.Links, text, template.URL(url)) + } + + for name, info := range p.ProbeInfo() { + vars.TotalProbes++ + if !info.Result { + vars.UnhealthyProbes++ + } + s := probeStatus{ProbeInfo: info} + if !info.End.IsZero() { + s.TimeSinceLast = time.Since(info.End).Truncate(time.Second) + } + for textTpl, urlTpl := range params.probeLinks { + text, err := renderTemplate(textTpl, info) + if err != nil { + return tsweb.Error(500, err.Error(), err) + } + url, err := renderTemplate(urlTpl, info) + if err != nil { + return tsweb.Error(500, err.Error(), err) + } + mak.Set(&s.Links, text, template.URL(url)) + } + mak.Set(&vars.Probes, name, s) + } + + if err := statusTpl.ExecuteTemplate(w, "status", vars); err != nil { + return tsweb.HTTPError{Code: 500, Err: err, Msg: "error rendering status page"} + } + return nil + } +} + +// renderTemplate renders the given Go template with the provided data +// and returns the result as a string. +func renderTemplate(tpl string, data any) (string, error) { + t, err := template.New("").Parse(tpl) + if err != nil { + return "", fmt.Errorf("error parsing template %q: %w", tpl, err) + } + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, "", data); err != nil { + return "", fmt.Errorf("error rendering template %q with data %v: %w", tpl, data, err) + } + return buf.String(), nil +} diff --git a/prober/status.html b/prober/status.html new file mode 100644 index 0000000000000..ff0f06c13fe62 --- /dev/null +++ b/prober/status.html @@ -0,0 +1,132 @@ +{{define "status"}} + + {{.Title}} + + +

    {{.Title}}

    +
      +
    • Prober Status: + {{if .UnhealthyProbes }} + {{.UnhealthyProbes}} + out of {{.TotalProbes}} probes failed or never ran. + {{else}} + All {{.TotalProbes}} probes are healthy + {{end}} +
    • + {{ range $text, $url := .Links }} +
    • {{$text}}
    • + {{end}} +
    + +

    Probes:

    + + + + + + + + + + + + {{range $name, $probeInfo := .Probes}} + + + + + + + + + + {{end}} + +
    NameProbe Class & LabelsIntervalLast AttemptSuccessLatencyLast Error
    + {{$name}} + {{range $text, $url := $probeInfo.Links}} +
    + + {{end}} +
    {{$probeInfo.Class}}
    +
    + {{range $label, $value := $probeInfo.Labels}} + {{$label}}={{$value}}
    + {{end}} +
    +
    {{$probeInfo.Interval}} + {{if $probeInfo.TimeSinceLast}} + {{$probeInfo.TimeSinceLast.String}} ago
    + {{$probeInfo.End.Format "2006-01-02T15:04:05Z07:00"}} + {{else}} + Never + {{end}} +
    + {{if $probeInfo.Result}} + {{$probeInfo.Result}} + {{else}} + {{$probeInfo.Result}} + {{end}}
    +
    Recent: {{$probeInfo.RecentResults}}
    +
    Mean: {{$probeInfo.RecentSuccessRatio}}
    +
    + {{$probeInfo.Latency.String}} +
    Recent: {{$probeInfo.RecentLatencies}}
    +
    Median: {{$probeInfo.RecentMedianLatency}}
    +
    {{$probeInfo.Error}}
    + + + + +{{end}} diff --git a/safeweb/http.go b/safeweb/http.go index 4181f9d0c81c0..c2787611e2457 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -300,3 +300,8 @@ func (s *Server) ServeRedirectHTTP(ln net.Listener, fqdn string) error { func (s *Server) Serve(ln net.Listener) error { return s.h.Serve(ln) } + +// Close closes all client connections and stops accepting new ones. +func (s *Server) Close() error { + return s.h.Close() +} diff --git a/safeweb/http_test.go b/safeweb/http_test.go index c5e2f9cbd3c61..f48aa64a79b7a 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -80,6 +80,7 @@ func TestPostRequestContentTypeValidation(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest("POST", "/", nil) req.Header.Set("Content-Type", tt.contentType) @@ -137,6 +138,7 @@ func TestAPIMuxCrossOriginResourceSharingHeaders(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest(tt.httpMethod, "/", nil) w := httptest.NewRecorder() @@ -192,6 +194,7 @@ func TestCSRFProtection(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() // construct the test request req := httptest.NewRequest("POST", "/", nil) @@ -267,6 +270,7 @@ func TestContentSecurityPolicyHeader(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() @@ -307,6 +311,7 @@ func TestCSRFCookieSecureMode(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() @@ -355,6 +360,7 @@ func TestRefererPolicy(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() @@ -379,6 +385,7 @@ func TestCSPAllowInlineStyles(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() @@ -474,6 +481,7 @@ func TestRouting(t *testing.T) { if err != nil { t.Fatal(err) } + defer s.Close() req := httptest.NewRequest("GET", tt.requestPath, nil) w := httptest.NewRecorder() diff --git a/ssh/tailssh/connect.go b/sessionrecording/connect.go similarity index 96% rename from ssh/tailssh/connect.go rename to sessionrecording/connect.go index c8602eaf31a03..12c5c8c018b6b 100644 --- a/ssh/tailssh/connect.go +++ b/sessionrecording/connect.go @@ -1,7 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package tailssh +// Package sessionrecording contains session recording utils shared amongst +// Tailscale SSH and Kubernetes API server proxy session recording. +package sessionrecording import ( "context" diff --git a/sessionrecording/header.go b/sessionrecording/header.go new file mode 100644 index 0000000000000..4806f6585f976 --- /dev/null +++ b/sessionrecording/header.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package sessionrecording + +import "tailscale.com/tailcfg" + +// CastHeader is the header of an asciinema file. +type CastHeader struct { + // Version is the asciinema file format version. + Version int `json:"version"` + + // Width is the terminal width in characters. + // It is non-zero for Pty sessions. + Width int `json:"width"` + + // Height is the terminal height in characters. + // It is non-zero for Pty sessions. + Height int `json:"height"` + + // Timestamp is the unix timestamp of when the recording started. + Timestamp int64 `json:"timestamp"` + + // Command is the command that was executed. + // Typically empty for shell sessions. + Command string `json:"command,omitempty"` + + // SrcNode is the FQDN of the node originating the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + SrcNode string `json:"srcNode"` + + // SrcNodeID is the node ID of the node originating the connection. + SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"` + + // Tailscale-specific fields: + // SrcNodeTags is the list of tags on the node originating the connection (if any). + SrcNodeTags []string `json:"srcNodeTags,omitempty"` + + // SrcNodeUserID is the user ID of the node originating the connection (if not tagged). + SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged + + // SrcNodeUser is the LoginName of the node originating the connection (if not tagged). + SrcNodeUser string `json:"srcNodeUser,omitempty"` + + // Fields that are only set for Tailscale SSH session recordings: + + // Env is the environment variables of the session. + // Only "TERM" is set (2023-03-22). + Env map[string]string `json:"env"` + + // SSHUser is the username as presented by the client. + SSHUser string `json:"sshUser"` // as presented by the client + + // LocalUser is the effective username on the server. + LocalUser string `json:"localUser"` + + // ConnectionID uniquely identifies a connection made to the SSH server. + // It may be shared across multiple sessions over the same connection in + // case of SSH multiplexing. + ConnectionID string `json:"connectionID"` + + // Fields that are only set for Kubernetes API server proxy session recordings: + + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` +} + +// Kubernetes contains 'kubectl exec' session specific information for +// tsrecorder. +type Kubernetes struct { + // PodName is the name of the Pod being exec-ed. + PodName string + // Namespace is the namespace in which is the Pod that is being exec-ed. + Namespace string + // Container is the container being exec-ed. + Container string +} diff --git a/shell.nix b/shell.nix index 3467ecca2177a..839da956e1096 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-CRzwQpi//TuLU8P66Dh4IdmM96f1YF10XyFfFBF4pQA= +# nix-direnv cache busting line: sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8= diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index a088335d8a850..fd747f5917ed1 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -36,6 +36,7 @@ import ( "tailscale.com/logtail/backoff" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tempfork/gliderlabs/ssh" "tailscale.com/types/key" @@ -1428,61 +1429,6 @@ func randBytes(n int) []byte { return b } -// CastHeader is the header of an asciinema file. -type CastHeader struct { - // Version is the asciinema file format version. - Version int `json:"version"` - - // Width is the terminal width in characters. - // It is non-zero for Pty sessions. - Width int `json:"width"` - - // Height is the terminal height in characters. - // It is non-zero for Pty sessions. - Height int `json:"height"` - - // Timestamp is the unix timestamp of when the recording started. - Timestamp int64 `json:"timestamp"` - - // Env is the environment variables of the session. - // Only "TERM" is set (2023-03-22). - Env map[string]string `json:"env"` - - // Command is the command that was executed. - // Typically empty for shell sessions. - Command string `json:"command,omitempty"` - - // Tailscale-specific fields: - // SrcNode is the FQDN of the node originating the connection. - // It is also the MagicDNS name for the node. - // It does not have a trailing dot. - // e.g. "host.tail-scale.ts.net" - SrcNode string `json:"srcNode"` - - // SrcNodeID is the node ID of the node originating the connection. - SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"` - - // SrcNodeTags is the list of tags on the node originating the connection (if any). - SrcNodeTags []string `json:"srcNodeTags,omitempty"` - - // SrcNodeUserID is the user ID of the node originating the connection (if not tagged). - SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged - - // SrcNodeUser is the LoginName of the node originating the connection (if not tagged). - SrcNodeUser string `json:"srcNodeUser,omitempty"` - - // SSHUser is the username as presented by the client. - SSHUser string `json:"sshUser"` // as presented by the client - - // LocalUser is the effective username on the server. - LocalUser string `json:"localUser"` - - // ConnectionID uniquely identifies a connection made to the SSH server. - // It may be shared across multiple sessions over the same connection in - // case of SSH multiplexing. - ConnectionID string `json:"connectionID"` -} - func (ss *sshSession) openFileForRecording(now time.Time) (_ io.WriteCloser, err error) { varRoot := ss.conn.srv.lb.TailscaleVarRoot() if varRoot == "" { @@ -1548,7 +1494,7 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) { } else { var errChan <-chan error var attempts []*tailcfg.SSHRecordingAttempt - rec.out, attempts, errChan, err = ConnectToRecorder(ctx, recorders, ss.conn.srv.lb.Dialer().UserDial) + rec.out, attempts, errChan, err = sessionrecording.ConnectToRecorder(ctx, recorders, ss.conn.srv.lb.Dialer().UserDial) if err != nil { if onFailure != nil && onFailure.NotifyURL != "" && len(attempts) > 0 { eventType := tailcfg.SSHSessionRecordingFailed @@ -1598,7 +1544,7 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) { }() } - ch := CastHeader{ + ch := sessionrecording.CastHeader{ Version: 2, Width: w.Width, Height: w.Height, diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 3f63554e51ed4..bfc67081400fd 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -36,6 +36,7 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" "tailscale.com/net/tsdial" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tempfork/gliderlabs/ssh" "tailscale.com/tsd" @@ -630,7 +631,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { wg.Wait() <-ctx.Done() // wait for recording to finish - var ch CastHeader + var ch sessionrecording.CastHeader if err := json.NewDecoder(bytes.NewReader(recording)).Decode(&ch); err != nil { t.Fatal(err) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 60a2244dd27d0..5a06c89ff9648 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -145,7 +145,9 @@ type CapabilityVersion int // - 100: 2024-06-18: Client supports filtertype.Match.SrcCaps (issue #12542) // - 101: 2024-07-01: Client supports SSH agent forwarding when handling connections with /bin/su // - 102: 2024-07-12: NodeAttrDisableMagicSockCryptoRouting support -const CurrentCapabilityVersion CapabilityVersion = 102 +// - 103: 2024-07-24: Client supports NodeAttrDisableCaptivePortalDetection +// - 104: 2024-08-03: SelfNodeV6MasqAddrForThisPeer now works +const CurrentCapabilityVersion CapabilityVersion = 104 type StableID string @@ -2327,6 +2329,10 @@ const ( // NodeAttrDisableMagicSockCryptoRouting disables the use of the // magicsock cryptorouting hook. See tailscale/corp#20732. NodeAttrDisableMagicSockCryptoRouting NodeCapability = "disable-magicsock-crypto-routing" + + // NodeAttrDisableCaptivePortalDetection instructs the client to not perform captive portal detection + // automatically when the network state changes. + NodeAttrDisableCaptivePortalDetection NodeCapability = "disable-captive-portal-detection" ) // SetDNSRequest is a request to add a DNS record. diff --git a/tool/node.rev b/tool/node.rev index 3876fd4986460..17719ce25a0c2 100644 --- a/tool/node.rev +++ b/tool/node.rev @@ -1 +1 @@ -18.16.1 +18.20.4 diff --git a/tsconst/interface.go b/tsconst/interface.go index f64e290a9337e..d17aa356d25fe 100644 --- a/tsconst/interface.go +++ b/tsconst/interface.go @@ -9,3 +9,7 @@ package tsconst // interfaces on Windows. This is set by the WinTun driver. const WintunInterfaceDesc = "Tailscale Tunnel" const WintunInterfaceDesc0_14 = "Wintun Userspace Tunnel" + +// TailnetLockNotTrustedMsg is the error message used by network lock +// and sniffed (via substring) out of an error sent over the network. +const TailnetLockNotTrustedMsg = "this node is not trusted by network lock" diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index d6fcdca272fb8..36a92759f7dd4 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -190,6 +190,7 @@ func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap * } httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv.Listener.Close() httpsrv.Listener = ln httpsrv.Config.ErrorLog = logger.StdLogger(logf) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 236c02e3244ad..30ac510361c06 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -842,6 +842,7 @@ func TestClientSideJailing(t *testing.T) { // TestNATPing creates two nodes, n1 and n2, sets up masquerades for both and // tries to do bi-directional pings between them. func TestNATPing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/12169") tstest.Shard(t) tstest.Parallel(t) for _, v6 := range []bool{false, true} { diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go new file mode 100644 index 0000000000000..d88a56e74cfbf --- /dev/null +++ b/tstest/integration/nat/nat_test.go @@ -0,0 +1,599 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package nat + +import ( + "bytes" + "cmp" + "context" + "errors" + "flag" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/mod/modfile" + "golang.org/x/sync/errgroup" + "tailscale.com/client/tailscale" + "tailscale.com/ipn/ipnstate" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/tstest/natlab/vnet" +) + +var ( + logTailscaled = flag.Bool("log-tailscaled", false, "log tailscaled output") + pcapFile = flag.String("pcap", "", "write pcap to file") +) + +type natTest struct { + tb testing.TB + base string // base image + tempDir string // for qcow2 images + vnet *vnet.Server + kernel string // linux kernel path + + gotRoute pingRoute +} + +func newNatTest(tb testing.TB) *natTest { + root, err := os.Getwd() + if err != nil { + tb.Fatal(err) + } + modRoot := filepath.Join(root, "../../..") + + nt := &natTest{ + tb: tb, + tempDir: tb.TempDir(), + base: filepath.Join(modRoot, "gokrazy/natlabapp.qcow2"), + } + + if _, err := os.Stat(nt.base); err != nil { + tb.Skipf("skipping test; base image %q not found", nt.base) + } + + nt.kernel, err = findKernelPath(filepath.Join(modRoot, "gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod")) + if err != nil { + tb.Skipf("skipping test; kernel not found: %v", err) + } + tb.Logf("found kernel: %v", nt.kernel) + + return nt +} + +func findKernelPath(goMod string) (string, error) { + b, err := os.ReadFile(goMod) + if err != nil { + return "", err + } + mf, err := modfile.Parse("go.mod", b, nil) + if err != nil { + return "", err + } + goModB, err := exec.Command("go", "env", "GOMODCACHE").CombinedOutput() + if err != nil { + return "", err + } + for _, r := range mf.Require { + if r.Mod.Path == "github.com/tailscale/gokrazy-kernel" { + return strings.TrimSpace(string(goModB)) + "/" + r.Mod.String() + "/vmlinuz", nil + } + } + return "", fmt.Errorf("failed to find kernel in %v", goMod) +} + +type addNodeFunc func(c *vnet.Config) *vnet.Node // returns nil to omit test + +func easy(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyNAT)) +} + +// easy + host firewall +func easyFW(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(vnet.HostFirewall, c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyNAT)) +} + +func easyAF(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyAFNAT)) +} + +func sameLAN(c *vnet.Config) *vnet.Node { + nw := c.FirstNetwork() + if nw == nil { + return nil + } + if !nw.CanTakeMoreNodes() { + return nil + } + return c.AddNode(nw) +} + +func one2one(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("172.16.%d.1/24", n), vnet.One2OneNAT)) +} + +func easyPMP(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyNAT, vnet.NATPMP)) +} + +// easy + port mapping + host firewall +func easyPMPFW(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode( + vnet.HostFirewall, + vnet.TailscaledEnv{ + Key: "TS_DEBUG_RAW_DISCO", + Value: "1", + }, + vnet.TailscaledEnv{ + Key: "TS_DEBUG_DISCO", + Value: "1", + }, + vnet.TailscaledEnv{ + Key: "TS_LOG_VERBOSITY", + Value: "2", + }, + c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyNAT, vnet.NATPMP)) +} + +// easy + port mapping + host firewall - BPF +func easyPMPFWNoBPF(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode( + vnet.HostFirewall, + vnet.TailscaledEnv{ + Key: "TS_DEBUG_DISABLE_RAW_DISCO", + Value: "1", + }, + c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), vnet.EasyNAT, vnet.NATPMP)) +} + +func hard(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("10.0.%d.1/24", n), vnet.HardNAT)) +} + +func hardPMP(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("10.7.%d.1/24", n), vnet.HardNAT, vnet.NATPMP)) +} + +func (nt *natTest) runTest(node1, node2 addNodeFunc) pingRoute { + t := nt.tb + + var c vnet.Config + c.SetPCAPFile(*pcapFile) + nodes := []*vnet.Node{ + node1(&c), + node2(&c), + } + if nodes[0] == nil || nodes[1] == nil { + t.Skip("skipping test; not applicable combination") + } + if *logTailscaled { + nodes[0].SetVerboseSyslog(true) + nodes[1].SetVerboseSyslog(true) + } + + var err error + nt.vnet, err = vnet.New(&c) + if err != nil { + t.Fatalf("newServer: %v", err) + } + nt.tb.Cleanup(func() { + nt.vnet.Close() + }) + + var wg sync.WaitGroup // waiting for srv.Accept goroutine + defer wg.Wait() + + sockAddr := filepath.Join(nt.tempDir, "qemu.sock") + srv, err := net.Listen("unix", sockAddr) + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer srv.Close() + + wg.Add(1) + go func() { + defer wg.Done() + for { + c, err := srv.Accept() + if err != nil { + return + } + go nt.vnet.ServeUnixConn(c.(*net.UnixConn), vnet.ProtocolQEMU) + } + }() + + for i, node := range nodes { + disk := fmt.Sprintf("%s/node-%d.qcow2", nt.tempDir, i) + out, err := exec.Command("qemu-img", "create", + "-f", "qcow2", + "-F", "qcow2", + "-b", nt.base, + disk).CombinedOutput() + if err != nil { + t.Fatalf("qemu-img create: %v, %s", err, out) + } + + var envBuf bytes.Buffer + for _, e := range node.Env() { + fmt.Fprintf(&envBuf, " tailscaled.env=%s=%s", e.Key, e.Value) + } + envStr := envBuf.String() + + cmd := exec.Command("qemu-system-x86_64", + "-M", "microvm,isa-serial=off", + "-m", "384M", + "-nodefaults", "-no-user-config", "-nographic", + "-kernel", nt.kernel, + "-append", "console=hvc0 root=PARTUUID=60c24cc1-f3f9-427a-8199-76baa2d60001/PARTNROFF=1 ro init=/gokrazy/init panic=10 oops=panic pci=off nousb tsc=unstable clocksource=hpet gokrazy.remote_syslog.target=52.52.0.9:995 tailscale-tta=1"+envStr, + "-drive", "id=blk0,file="+disk+",format=qcow2", + "-device", "virtio-blk-device,drive=blk0", + "-netdev", "stream,id=net0,addr.type=unix,addr.path="+sockAddr, + "-device", "virtio-serial-device", + "-device", "virtio-rng-device", + "-device", "virtio-net-device,netdev=net0,mac="+node.MAC().String(), + "-chardev", "stdio,id=virtiocon0,mux=on", + "-device", "virtconsole,chardev=virtiocon0", + "-mon", "chardev=virtiocon0,mode=readline", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + t.Fatalf("qemu: %v", err) + } + nt.tb.Cleanup(func() { + cmd.Process.Kill() + cmd.Wait() + }) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + lc1 := nt.vnet.NodeAgentClient(nodes[0]) + lc2 := nt.vnet.NodeAgentClient(nodes[1]) + clients := []*vnet.NodeAgentClient{lc1, lc2} + + var eg errgroup.Group + var sts [2]*ipnstate.Status + for i, c := range clients { + i, c := i, c + eg.Go(func() error { + node := nodes[i] + st, err := c.Status(ctx) + if err != nil { + return fmt.Errorf("%v status: %w", node, err) + } + t.Logf("%v status: %v", node, st.BackendState) + + if node.HostFirewall() { + if err := c.EnableHostFirewall(ctx); err != nil { + return fmt.Errorf("%v firewall: %w", node, err) + } + t.Logf("%v firewalled", node) + } + + if err := up(ctx, c); err != nil { + return fmt.Errorf("%v up: %w", node, err) + } + t.Logf("%v up!", node) + + st, err = c.Status(ctx) + if err != nil { + return fmt.Errorf("%v status: %w", node, err) + } + sts[i] = st + + if st.BackendState != "Running" { + return fmt.Errorf("%v state = %q", node, st.BackendState) + } + t.Logf("%v up with %v", node, sts[i].Self.TailscaleIPs) + return nil + }) + } + if err := eg.Wait(); err != nil { + t.Fatalf("initial setup: %v", err) + } + + defer nt.vnet.Close() + + pingRes, err := ping(ctx, lc1, sts[1].Self.TailscaleIPs[0]) + if err != nil { + t.Fatalf("ping failure: %v", err) + } + nt.gotRoute = classifyPing(pingRes) + t.Logf("ping route: %v", nt.gotRoute) + + return nt.gotRoute +} + +func classifyPing(pr *ipnstate.PingResult) pingRoute { + if pr == nil { + return routeNil + } + if pr.Endpoint != "" { + ap, err := netip.ParseAddrPort(pr.Endpoint) + if err == nil { + if ap.Addr().IsPrivate() { + return routeLocal + } + return routeDirect + } + } + return routeDERP // presumably +} + +type pingRoute string + +const ( + routeDERP pingRoute = "derp" + routeLocal pingRoute = "local" + routeDirect pingRoute = "direct" + routeNil pingRoute = "nil" // *ipnstate.PingResult is nil +) + +func ping(ctx context.Context, c *vnet.NodeAgentClient, target netip.Addr) (*ipnstate.PingResult, error) { + n := 0 + var res *ipnstate.PingResult + anyPong := false + for n < 10 { + n++ + pr, err := c.PingWithOpts(ctx, target, tailcfg.PingDisco, tailscale.PingOpts{}) + if err != nil { + if anyPong { + return res, nil + } + return nil, err + } + if pr.Err != "" { + return nil, errors.New(pr.Err) + } + if pr.DERPRegionID == 0 { + return pr, nil + } + res = pr + select { + case <-ctx.Done(): + case <-time.After(time.Second): + } + } + if res == nil { + return nil, errors.New("no ping response") + } + return res, nil +} + +func up(ctx context.Context, c *vnet.NodeAgentClient) error { + req, err := http.NewRequestWithContext(ctx, "GET", "http://unused/up", nil) + if err != nil { + return err + } + res, err := c.HTTPClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + all, _ := io.ReadAll(res.Body) + if res.StatusCode != 200 { + return fmt.Errorf("unexpected status code %v: %s", res.Status, all) + } + return nil +} + +type nodeType struct { + name string + fn addNodeFunc +} + +var types = []nodeType{ + {"easy", easy}, + {"easyAF", easyAF}, + {"hard", hard}, + {"easyPMP", easyPMP}, + {"hardPMP", hardPMP}, + {"one2one", one2one}, + {"sameLAN", sameLAN}, +} + +// want sets the expected ping route for the test. +func (nt *natTest) want(r pingRoute) { + if nt.gotRoute != r { + nt.tb.Errorf("ping route = %v; want %v", nt.gotRoute, r) + } +} + +func TestEasyEasy(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easy, easy) + nt.want(routeDirect) +} + +func TestSameLAN(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easy, sameLAN) + nt.want(routeLocal) +} + +// TestBPFDisco tests https://github.com/tailscale/tailscale/issues/3824 ... +// * server behind a Hard NAT +// * client behind a NAT with UPnP support +// * client machine has a stateful host firewall (e.g. ufw) +func TestBPFDisco(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easyPMPFW, hard) + nt.want(routeDirect) +} + +func TestHostFWNoBPF(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easyPMPFWNoBPF, hard) + nt.want(routeDERP) +} + +func TestHostFWPair(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easyFW, easyFW) + nt.want(routeDirect) +} + +func TestOneHostFW(t *testing.T) { + nt := newNatTest(t) + nt.runTest(easy, easyFW) + nt.want(routeDirect) +} + +var pair = flag.String("pair", "", "comma-separated pair of types to test (easy, easyAF, hard, easyPMP, hardPMP, one2one, sameLAN)") + +func TestPair(t *testing.T) { + t1, t2, ok := strings.Cut(*pair, ",") + if !ok { + t.Skipf("skipping test without --pair=type1,type2 set") + } + find := func(name string) addNodeFunc { + for _, nt := range types { + if nt.name == name { + return nt.fn + } + } + t.Fatalf("unknown type %q", name) + return nil + } + + nt := newNatTest(t) + nt.runTest(find(t1), find(t2)) +} + +var runGrid = flag.Bool("run-grid", false, "run grid test") + +func TestGrid(t *testing.T) { + if !*runGrid { + t.Skip("skipping grid test; set --run-grid to run") + } + t.Parallel() + + sem := syncs.NewSemaphore(2) + var ( + mu sync.Mutex + res = make(map[string]pingRoute) + ) + for _, a := range types { + for _, b := range types { + key := a.name + "-" + b.name + keyBack := b.name + "-" + a.name + t.Run(key, func(t *testing.T) { + t.Parallel() + + sem.Acquire() + defer sem.Release() + + filename := key + ".cache" + contents, _ := os.ReadFile(filename) + if len(contents) == 0 { + filename2 := keyBack + ".cache" + contents, _ = os.ReadFile(filename2) + } + route := pingRoute(strings.TrimSpace(string(contents))) + + if route == "" { + nt := newNatTest(t) + route = nt.runTest(a.fn, b.fn) + if err := os.WriteFile(filename, []byte(string(route)), 0666); err != nil { + t.Fatalf("writeFile: %v", err) + } + } + + mu.Lock() + defer mu.Unlock() + res[key] = route + t.Logf("results: %v", res) + }) + } + } + + t.Cleanup(func() { + mu.Lock() + defer mu.Unlock() + var hb bytes.Buffer + pf := func(format string, args ...any) { + fmt.Fprintf(&hb, format, args...) + } + rewrite := func(s string) string { + return strings.ReplaceAll(s, "PMP", "+pm") + } + pf("") + pf("") + for _, a := range types { + pf("", rewrite(a.name)) + } + pf("\n") + + for _, a := range types { + if a.name == "sameLAN" { + continue + } + pf("", rewrite(a.name)) + for _, b := range types { + key := a.name + "-" + b.name + key2 := b.name + "-" + a.name + v := cmp.Or(res[key], res[key2], "-") + if v == "derp" { + pf("", v) + } else if v == "local" { + pf("", v) + } else { + pf("", v) + } + } + pf("\n") + } + pf("
    %s
    %s
    %s
    %s
    %s
    ") + pf("easy: Endpoint-Independent Mapping, Address and Port-Dependent Filtering (e.g. Linux, Google Wifi, Unifi, eero)
    ") + pf("easyAF: Endpoint-Independent Mapping, Address-Dependent Filtering (James says telephony things or Zyxel type things)
    ") + pf("hard: Address and Port-Dependent Mapping, Address and Port-Dependent Filtering (FreeBSD, OPNSense, pfSense)
    ") + pf("one2one: One-to-One NAT (e.g. an EC2 instance with a public IPv4)
    ") + pf("x+pm: x, with port mapping (NAT-PMP, PCP, UPnP, etc)
    ") + pf("sameLAN: a second node in the same LAN as the first
    ") + pf("") + + if err := os.WriteFile("grid.html", hb.Bytes(), 0666); err != nil { + t.Fatalf("writeFile: %v", err) + } + }) +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 6d366d46e7e02..6676ee22cbd1c 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/health" + _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" _ "tailscale.com/ipn/conffile" _ "tailscale.com/ipn/ipnlocal" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 6d366d46e7e02..6676ee22cbd1c 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/health" + _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" _ "tailscale.com/ipn/conffile" _ "tailscale.com/ipn/ipnlocal" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 6d366d46e7e02..6676ee22cbd1c 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/health" + _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" _ "tailscale.com/ipn/conffile" _ "tailscale.com/ipn/ipnlocal" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 6d366d46e7e02..6676ee22cbd1c 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/health" + _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" _ "tailscale.com/ipn/conffile" _ "tailscale.com/ipn/ipnlocal" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 015bf828496bb..bbf46d8c21938 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -25,6 +25,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/health" + _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" _ "tailscale.com/ipn/conffile" _ "tailscale.com/ipn/ipnlocal" diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go new file mode 100644 index 0000000000000..1703e0c1296a8 --- /dev/null +++ b/tstest/natlab/vnet/conf.go @@ -0,0 +1,340 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import ( + "cmp" + "fmt" + "log" + "net/netip" + "os" + "slices" + + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" + "tailscale.com/types/logger" + "tailscale.com/util/must" + "tailscale.com/util/set" +) + +// Note: the exported Node and Network are the configuration types; +// the unexported node and network are the runtime types that are actually +// used once the server is created. + +// Config is the requested state of the natlab virtual network. +// +// The zero value is a valid empty configuration. Call AddNode +// and AddNetwork to methods on the returned Node and Network +// values to modify the config before calling NewServer. +// Once the NewServer is called, Config is no longer used. +type Config struct { + nodes []*Node + networks []*Network + pcapFile string +} + +func (c *Config) SetPCAPFile(file string) { + c.pcapFile = file +} + +func (c *Config) NumNodes() int { + return len(c.nodes) +} + +func (c *Config) FirstNetwork() *Network { + if len(c.networks) == 0 { + return nil + } + return c.networks[0] +} + +// AddNode creates a new node in the world. +// +// The opts may be of the following types: +// - *Network: zero, one, or more networks to add this node to +// - TODO: more +// +// On an error or unknown opt type, AddNode returns a +// node with a carried error that gets returned later. +func (c *Config) AddNode(opts ...any) *Node { + num := len(c.nodes) + n := &Node{ + num: num + 1, + mac: MAC{0x52, 0xcc, 0xcc, 0xcc, 0xcc, byte(num) + 1}, // 52=TS then 0xcc for ccclient + } + c.nodes = append(c.nodes, n) + for _, o := range opts { + switch o := o.(type) { + case *Network: + if !slices.Contains(o.nodes, n) { + o.nodes = append(o.nodes, n) + } + n.nets = append(n.nets, o) + case TailscaledEnv: + n.env = append(n.env, o) + case NodeOption: + switch o { + case HostFirewall: + n.hostFW = true + case VerboseSyslog: + n.verboseSyslog = true + default: + if n.err == nil { + n.err = fmt.Errorf("unknown NodeOption %q", o) + } + } + default: + if n.err == nil { + n.err = fmt.Errorf("unknown AddNode option type %T", o) + } + } + } + return n +} + +// NodeOption is an option that can be passed to Config.AddNode. +type NodeOption string + +const ( + HostFirewall NodeOption = "HostFirewall" + VerboseSyslog NodeOption = "VerboseSyslog" +) + +// TailscaledEnv is а option that can be passed to Config.AddNode +// to set an environment variable for tailscaled. +type TailscaledEnv struct { + Key, Value string +} + +// AddNetwork add a new network. +// +// The opts may be of the following types: +// - string IP address, for the network's WAN IP (if any) +// - string netip.Prefix, for the network's LAN IP (defaults to 192.168.0.0/24) +// - NAT, the type of NAT to use +// - NetworkService, a service to add to the network +// +// On an error or unknown opt type, AddNetwork returns a +// network with a carried error that gets returned later. +func (c *Config) AddNetwork(opts ...any) *Network { + num := len(c.networks) + n := &Network{ + mac: MAC{0x52, 0xee, 0xee, 0xee, 0xee, byte(num) + 1}, // 52=TS then 0xee for 'etwork + } + c.networks = append(c.networks, n) + for _, o := range opts { + switch o := o.(type) { + case string: + if ip, err := netip.ParseAddr(o); err == nil { + n.wanIP = ip + } else if ip, err := netip.ParsePrefix(o); err == nil { + n.lanIP = ip + } else { + if n.err == nil { + n.err = fmt.Errorf("unknown string option %q", o) + } + } + case NAT: + n.natType = o + case NetworkService: + n.AddService(o) + default: + if n.err == nil { + n.err = fmt.Errorf("unknown AddNetwork option type %T", o) + } + } + } + return n +} + +// Node is the configuration of a node in the virtual network. +type Node struct { + err error + num int // 1-based node number + n *node // nil until NewServer called + + env []TailscaledEnv + hostFW bool + verboseSyslog bool + + // TODO(bradfitz): this is halfway converted to supporting multiple NICs + // but not done. We need a MAC-per-Network. + + mac MAC + nets []*Network +} + +// Num returns the 1-based node number. +func (n *Node) Num() int { + return n.num +} + +// String returns the string "nodeN" where N is the 1-based node number. +func (n *Node) String() string { + return fmt.Sprintf("node%d", n.num) +} + +// MAC returns the MAC address of the node. +func (n *Node) MAC() MAC { + return n.mac +} + +func (n *Node) Env() []TailscaledEnv { + return n.env +} + +func (n *Node) HostFirewall() bool { + return n.hostFW +} + +func (n *Node) VerboseSyslog() bool { + return n.verboseSyslog +} + +func (n *Node) SetVerboseSyslog(v bool) { + n.verboseSyslog = v +} + +// Network returns the first network this node is connected to, +// or nil if none. +func (n *Node) Network() *Network { + if len(n.nets) == 0 { + return nil + } + return n.nets[0] +} + +// Network is the configuration of a network in the virtual network. +type Network struct { + mac MAC // MAC address of the router/gateway + natType NAT + + wanIP netip.Addr + lanIP netip.Prefix + nodes []*Node + + svcs set.Set[NetworkService] + + // ... + err error // carried error +} + +func (n *Network) CanTakeMoreNodes() bool { + if n.natType == One2OneNAT { + return len(n.nodes) == 0 + } + return len(n.nodes) < 150 +} + +// NetworkService is a service that can be added to a network. +type NetworkService string + +const ( + NATPMP NetworkService = "NAT-PMP" + PCP NetworkService = "PCP" + UPnP NetworkService = "UPnP" +) + +// AddService adds a network service (such as port mapping protocols) to a +// network. +func (n *Network) AddService(s NetworkService) { + if n.svcs == nil { + n.svcs = set.Of(s) + } else { + n.svcs.Add(s) + } +} + +// initFromConfig initializes the server from the previous calls +// to NewNode and NewNetwork and returns an error if +// there were any configuration issues. +func (s *Server) initFromConfig(c *Config) error { + netOfConf := map[*Network]*network{} + if c.pcapFile != "" { + pcf, err := os.OpenFile(c.pcapFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return err + } + nw, err := pcapgo.NewNgWriter(pcf, layers.LinkTypeEthernet) + if err != nil { + return err + } + pw := &pcapWriter{ + f: pcf, + w: nw, + } + s.pcapWriter = pw + } + for i, conf := range c.networks { + if conf.err != nil { + return conf.err + } + if !conf.lanIP.IsValid() { + conf.lanIP = netip.MustParsePrefix("192.168.0.0/24") + } + n := &network{ + s: s, + mac: conf.mac, + portmap: conf.svcs.Contains(NATPMP), // TODO: expand network.portmap + wanIP: conf.wanIP, + lanIP: conf.lanIP, + nodesByIP: map[netip.Addr]*node{}, + logf: logger.WithPrefix(log.Printf, fmt.Sprintf("[net-%v] ", conf.mac)), + } + netOfConf[conf] = n + s.networks.Add(n) + if _, ok := s.networkByWAN[conf.wanIP]; ok { + return fmt.Errorf("two networks have the same WAN IP %v; Anycast not (yet?) supported", conf.wanIP) + } + s.networkByWAN[conf.wanIP] = n + n.lanInterfaceID = must.Get(s.pcapWriter.AddInterface(pcapgo.NgInterface{ + Name: fmt.Sprintf("network%d-lan", i+1), + LinkType: layers.LinkTypeIPv4, + })) + n.wanInterfaceID = must.Get(s.pcapWriter.AddInterface(pcapgo.NgInterface{ + Name: fmt.Sprintf("network%d-wan", i+1), + LinkType: layers.LinkTypeIPv4, + })) + } + for _, conf := range c.nodes { + if conf.err != nil { + return conf.err + } + n := &node{ + num: conf.num, + mac: conf.mac, + net: netOfConf[conf.Network()], + verboseSyslog: conf.VerboseSyslog(), + } + n.interfaceID = must.Get(s.pcapWriter.AddInterface(pcapgo.NgInterface{ + Name: n.String(), + LinkType: layers.LinkTypeEthernet, + })) + conf.n = n + if _, ok := s.nodeByMAC[n.mac]; ok { + return fmt.Errorf("two nodes have the same MAC %v", n.mac) + } + s.nodes = append(s.nodes, n) + s.nodeByMAC[n.mac] = n + + // Allocate a lanIP for the node. Use the network's CIDR and use final + // octet 101 (for first node), 102, etc. The node number comes from the + // last octent of the MAC address (0-based) + ip4 := n.net.lanIP.Addr().As4() + ip4[3] = 100 + n.mac[5] + n.lanIP = netip.AddrFrom4(ip4) + n.net.nodesByIP[n.lanIP] = n + } + + // Now that nodes are populated, set up NAT: + for _, conf := range c.networks { + n := netOfConf[conf] + natType := cmp.Or(conf.natType, EasyNAT) + if err := n.InitNAT(natType); err != nil { + return err + } + } + + return nil +} diff --git a/tstest/natlab/vnet/conf_test.go b/tstest/natlab/vnet/conf_test.go new file mode 100644 index 0000000000000..15d3c69ef52d9 --- /dev/null +++ b/tstest/natlab/vnet/conf_test.go @@ -0,0 +1,80 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import "testing" + +func TestConfig(t *testing.T) { + tests := []struct { + name string + setup func(*Config) + wantErr string + }{ + { + name: "simple", + setup: func(c *Config) { + c.AddNode(c.AddNetwork("2.1.1.1", "192.168.1.1/24", EasyNAT, NATPMP)) + c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", HardNAT)) + }, + }, + { + name: "indirect", + setup: func(c *Config) { + n1 := c.AddNode(c.AddNetwork("2.1.1.1", "192.168.1.1/24", HardNAT)) + n1.Network().AddService(NATPMP) + c.AddNode(c.AddNetwork("2.2.2.2", "10.2.0.1/16", NAT("hard"))) + }, + }, + { + name: "multi-node-in-net", + setup: func(c *Config) { + net1 := c.AddNetwork("2.1.1.1", "192.168.1.1/24") + c.AddNode(net1) + c.AddNode(net1) + }, + }, + { + name: "dup-wan-ip", + setup: func(c *Config) { + c.AddNetwork("2.1.1.1", "192.168.1.1/24") + c.AddNetwork("2.1.1.1", "10.2.0.1/16") + }, + wantErr: "two networks have the same WAN IP 2.1.1.1; Anycast not (yet?) supported", + }, + { + name: "one-to-one-nat-with-multiple-nodes", + setup: func(c *Config) { + net1 := c.AddNetwork("2.1.1.1", "192.168.1.1/24", One2OneNAT) + c.AddNode(net1) + c.AddNode(net1) + }, + wantErr: "error creating NAT type \"one2one\" for network 2.1.1.1: can't use one2one NAT type on networks other than single-node networks", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var c Config + tt.setup(&c) + _, err := New(&c) + if err == nil { + if tt.wantErr == "" { + return + } + t.Fatalf("got success; wanted error %q", tt.wantErr) + } + if err.Error() != tt.wantErr { + t.Fatalf("got error %q; want %q", err, tt.wantErr) + } + }) + } +} + +func TestNodeString(t *testing.T) { + if g, w := (&Node{num: 1}).String(), "node1"; g != w { + t.Errorf("got %q; want %q", g, w) + } + if g, w := (&node{num: 1}).String(), "node1"; g != w { + t.Errorf("got %q; want %q", g, w) + } +} diff --git a/tstest/natlab/vnet/easyaf.go b/tstest/natlab/vnet/easyaf.go new file mode 100644 index 0000000000000..0901bbdffdd7d --- /dev/null +++ b/tstest/natlab/vnet/easyaf.go @@ -0,0 +1,91 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import ( + "log" + "math/rand/v2" + "net/netip" + "time" + + "tailscale.com/util/mak" +) + +// easyAFNAT is an "Endpoint Independent" NAT, like Linux and most home routers +// (many of which are Linux), but with only address filtering, not address+port +// filtering. +// +// James says these are used by "anyone with “voip helpers” turned on" +// "which is a lot of home modem routers" ... "probably like most of the zyxel +// type things". +type easyAFNAT struct { + pool IPPool + wanIP netip.Addr + out map[netip.Addr]portMappingAndTime + in map[uint16]lanAddrAndTime + lastOut map[srcAPDstAddrTuple]time.Time // (lan:port, wan:port) => last packet out time +} + +type srcAPDstAddrTuple struct { + src netip.AddrPort + dst netip.Addr +} + +func init() { + registerNATType(EasyAFNAT, func(p IPPool) (NATTable, error) { + return &easyAFNAT{pool: p, wanIP: p.WANIP()}, nil + }) +} + +func (n *easyAFNAT) IsPublicPortUsed(ap netip.AddrPort) bool { + if ap.Addr() != n.wanIP { + return false + } + _, ok := n.in[ap.Port()] + return ok +} + +func (n *easyAFNAT) PickOutgoingSrc(src, dst netip.AddrPort, at time.Time) (wanSrc netip.AddrPort) { + mak.Set(&n.lastOut, srcAPDstAddrTuple{src, dst.Addr()}, at) + if pm, ok := n.out[src.Addr()]; ok { + // Existing flow. + // TODO: bump timestamp + return netip.AddrPortFrom(n.wanIP, pm.port) + } + + // Loop through all 32k high (ephemeral) ports, starting at a random + // position and looping back around to the start. + start := rand.N(uint16(32 << 10)) + for off := range uint16(32 << 10) { + port := 32<<10 + (start+off)%(32<<10) + if _, ok := n.in[port]; !ok { + wanAddr := netip.AddrPortFrom(n.wanIP, port) + if n.pool.IsPublicPortUsed(wanAddr) { + continue + } + + // Found a free port. + mak.Set(&n.out, src.Addr(), portMappingAndTime{port: port, at: at}) + mak.Set(&n.in, port, lanAddrAndTime{lanAddr: src, at: at}) + return wanAddr + } + } + return netip.AddrPort{} // failed to allocate a mapping; TODO: fire an alert? +} + +func (n *easyAFNAT) PickIncomingDst(src, dst netip.AddrPort, at time.Time) (lanDst netip.AddrPort) { + if dst.Addr() != n.wanIP { + return netip.AddrPort{} // drop; not for us. shouldn't happen if natlabd routing isn't broken. + } + lanDst = n.in[dst.Port()].lanAddr + + // Stateful firewall: drop incoming packets that don't have traffic out. + // TODO(bradfitz): verify Linux does this in the router code, not in the NAT code. + if t, ok := n.lastOut[srcAPDstAddrTuple{lanDst, src.Addr()}]; !ok || at.Sub(t) > 300*time.Second { + log.Printf("Drop incoming packet from %v to %v; no recent outgoing packet", src, dst) + return netip.AddrPort{} + } + + return lanDst +} diff --git a/tstest/natlab/vnet/nat.go b/tstest/natlab/vnet/nat.go new file mode 100644 index 0000000000000..ad6f29b3adb58 --- /dev/null +++ b/tstest/natlab/vnet/nat.go @@ -0,0 +1,293 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import ( + "errors" + "log" + "math/rand/v2" + "net/netip" + "time" + + "tailscale.com/util/mak" +) + +const ( + One2OneNAT NAT = "one2one" + EasyNAT NAT = "easy" // address+port filtering + EasyAFNAT NAT = "easyaf" // address filtering (not port) + HardNAT NAT = "hard" +) + +// IPPool is the interface that a NAT implementation uses to get information +// about a network. +// +// Outside of tests, this is typically a *network. +type IPPool interface { + // WANIP returns the primary WAN IP address. + // + // TODO: add another method for networks with multiple WAN IP addresses. + WANIP() netip.Addr + + // SoleLanIP reports whether this network has a sole LAN client + // and if so, its IP address. + SoleLANIP() (_ netip.Addr, ok bool) + + // IsPublicPortUsed reports whether the provided WAN IP+port is in use by + // anything. (In particular, the NAT-PMP/etc port mappers might have taken + // a port.) Implementations should check this before allocating a port, + // and then they should report IsPublicPortUsed themselves for that port. + IsPublicPortUsed(netip.AddrPort) bool +} + +// newTableFunc is a constructor for a NAT table. +// The provided IPPool is typically (outside of tests) a *network. +type newTableFunc func(IPPool) (NATTable, error) + +// NAT is a type of NAT that's known to natlab. +// +// For example, "easy" for Linux-style NAT, "hard" for FreeBSD-style NAT, etc. +type NAT string + +// natTypes are the known NAT types. +var natTypes = map[NAT]newTableFunc{} + +// registerNATType registers a NAT type. +func registerNATType(name NAT, f newTableFunc) { + if _, ok := natTypes[name]; ok { + panic("duplicate NAT type: " + name) + } + natTypes[name] = f +} + +// NATTable is what a NAT implementation is expected to do. +// +// This project tests Tailscale as it faces various combinations various NAT +// implementations (e.g. Linux easy style NAT vs FreeBSD hard/endpoint dependent +// NAT vs Cloud 1:1 NAT, etc) +// +// Implementations of NATTable need not handle concurrency; the natlab serializes +// all calls into a NATTable. +// +// The provided `at` value will typically be time.Now, except for tests. +// Implementations should not use real time and should only compare +// previously provided time values. +type NATTable interface { + // PickOutgoingSrc returns the source address to use for an outgoing packet. + // + // The result should either be invalid (to drop the packet) or a WAN (not + // private) IP address. + // + // Typically, the src is a LAN source IP address, but it might also be a WAN + // IP address if the packet is being forwarded for a source machine that has + // a public IP address. + PickOutgoingSrc(src, dst netip.AddrPort, at time.Time) (wanSrc netip.AddrPort) + + // PickIncomingDst returns the destination address to use for an incoming + // packet. The incoming src address is always a public WAN IP. + // + // The result should either be invalid (to drop the packet) or the IP + // address of a machine on the local network address, usually a private + // LAN IP. + PickIncomingDst(src, dst netip.AddrPort, at time.Time) (lanDst netip.AddrPort) + + // IsPublicPortUsed reports whether the provided WAN IP+port is in use by + // anything. The port mapper uses this to avoid grabbing an in-use port. + IsPublicPortUsed(netip.AddrPort) bool +} + +// oneToOneNAT is a 1:1 NAT, like a typical EC2 VM. +type oneToOneNAT struct { + lanIP netip.Addr + wanIP netip.Addr +} + +func init() { + registerNATType(One2OneNAT, func(p IPPool) (NATTable, error) { + lanIP, ok := p.SoleLANIP() + if !ok { + return nil, errors.New("can't use one2one NAT type on networks other than single-node networks") + } + return &oneToOneNAT{lanIP: lanIP, wanIP: p.WANIP()}, nil + }) +} + +func (n *oneToOneNAT) PickOutgoingSrc(src, dst netip.AddrPort, at time.Time) (wanSrc netip.AddrPort) { + return netip.AddrPortFrom(n.wanIP, src.Port()) +} + +func (n *oneToOneNAT) PickIncomingDst(src, dst netip.AddrPort, at time.Time) (lanDst netip.AddrPort) { + return netip.AddrPortFrom(n.lanIP, dst.Port()) +} + +func (n *oneToOneNAT) IsPublicPortUsed(netip.AddrPort) bool { + return true // all ports are owned by the 1:1 NAT +} + +type srcDstTuple struct { + src netip.AddrPort + dst netip.AddrPort +} + +type hardKeyIn struct { + wanPort uint16 + src netip.AddrPort +} + +type portMappingAndTime struct { + port uint16 + at time.Time +} + +type lanAddrAndTime struct { + lanAddr netip.AddrPort + at time.Time +} + +// hardNAT is an "Endpoint Dependent" NAT, like FreeBSD/pfSense/OPNsense. +// This is shown as "MappingVariesByDestIP: true" by netcheck, and what +// Tailscale calls "Hard NAT". +type hardNAT struct { + pool IPPool + wanIP netip.Addr + + out map[srcDstTuple]portMappingAndTime + in map[hardKeyIn]lanAddrAndTime +} + +func init() { + registerNATType(HardNAT, func(p IPPool) (NATTable, error) { + return &hardNAT{pool: p, wanIP: p.WANIP()}, nil + }) +} + +func (n *hardNAT) IsPublicPortUsed(ap netip.AddrPort) bool { + if ap.Addr() != n.wanIP { + return false + } + for k := range n.in { + if k.wanPort == ap.Port() { + return true + } + } + return false +} + +func (n *hardNAT) PickOutgoingSrc(src, dst netip.AddrPort, at time.Time) (wanSrc netip.AddrPort) { + ko := srcDstTuple{src, dst} + if pm, ok := n.out[ko]; ok { + // Existing flow. + // TODO: bump timestamp + return netip.AddrPortFrom(n.wanIP, pm.port) + } + + // No existing mapping exists. Create one. + + // TODO: clean up old expired mappings + + // Instead of proper data structures that would be efficient, we instead + // just loop a bunch and look for a free port. This project is only used + // by tests and doesn't care about performance, this is good enough. + for { + port := rand.N(uint16(32<<10)) + 32<<10 // pick some "ephemeral" port + if n.pool.IsPublicPortUsed(netip.AddrPortFrom(n.wanIP, port)) { + continue + } + + ki := hardKeyIn{wanPort: port, src: dst} + if _, ok := n.in[ki]; ok { + // Port already in use. + continue + } + mak.Set(&n.in, ki, lanAddrAndTime{lanAddr: src, at: at}) + mak.Set(&n.out, ko, portMappingAndTime{port: port, at: at}) + return netip.AddrPortFrom(n.wanIP, port) + } +} + +func (n *hardNAT) PickIncomingDst(src, dst netip.AddrPort, at time.Time) (lanDst netip.AddrPort) { + if dst.Addr() != n.wanIP { + return netip.AddrPort{} // drop; not for us. shouldn't happen if natlabd routing isn't broken. + } + ki := hardKeyIn{wanPort: dst.Port(), src: src} + if pm, ok := n.in[ki]; ok { + // Existing flow. + return pm.lanAddr + } + return netip.AddrPort{} // drop; no mapping +} + +// easyNAT is an "Endpoint Independent" NAT, like Linux and most home routers +// (many of which are Linux). +// +// This is shown as "MappingVariesByDestIP: false" by netcheck, and what +// Tailscale calls "Easy NAT". +// +// Unlike Linux, this implementation is capped at 32k entries and doesn't resort +// to other allocation strategies when all 32k WAN ports are taken. +type easyNAT struct { + pool IPPool + wanIP netip.Addr + out map[netip.AddrPort]portMappingAndTime + in map[uint16]lanAddrAndTime + lastOut map[srcDstTuple]time.Time // (lan:port, wan:port) => last packet out time +} + +func init() { + registerNATType(EasyNAT, func(p IPPool) (NATTable, error) { + return &easyNAT{pool: p, wanIP: p.WANIP()}, nil + }) +} + +func (n *easyNAT) IsPublicPortUsed(ap netip.AddrPort) bool { + if ap.Addr() != n.wanIP { + return false + } + _, ok := n.in[ap.Port()] + return ok +} + +func (n *easyNAT) PickOutgoingSrc(src, dst netip.AddrPort, at time.Time) (wanSrc netip.AddrPort) { + mak.Set(&n.lastOut, srcDstTuple{src, dst}, at) + if pm, ok := n.out[src]; ok { + // Existing flow. + // TODO: bump timestamp + return netip.AddrPortFrom(n.wanIP, pm.port) + } + + // Loop through all 32k high (ephemeral) ports, starting at a random + // position and looping back around to the start. + start := rand.N(uint16(32 << 10)) + for off := range uint16(32 << 10) { + port := 32<<10 + (start+off)%(32<<10) + if _, ok := n.in[port]; !ok { + wanAddr := netip.AddrPortFrom(n.wanIP, port) + if n.pool.IsPublicPortUsed(wanAddr) { + continue + } + + // Found a free port. + mak.Set(&n.out, src, portMappingAndTime{port: port, at: at}) + mak.Set(&n.in, port, lanAddrAndTime{lanAddr: src, at: at}) + return wanAddr + } + } + return netip.AddrPort{} // failed to allocate a mapping; TODO: fire an alert? +} + +func (n *easyNAT) PickIncomingDst(src, dst netip.AddrPort, at time.Time) (lanDst netip.AddrPort) { + if dst.Addr() != n.wanIP { + return netip.AddrPort{} // drop; not for us. shouldn't happen if natlabd routing isn't broken. + } + lanDst = n.in[dst.Port()].lanAddr + + // Stateful firewall: drop incoming packets that don't have traffic out. + // TODO(bradfitz): verify Linux does this in the router code, not in the NAT code. + if t, ok := n.lastOut[srcDstTuple{lanDst, src}]; !ok || at.Sub(t) > 300*time.Second { + log.Printf("Drop incoming packet from %v to %v; no recent outgoing packet", src, dst) + return netip.AddrPort{} + } + + return lanDst +} diff --git a/tstest/natlab/vnet/pcap.go b/tstest/natlab/vnet/pcap.go new file mode 100644 index 0000000000000..fa1904667790a --- /dev/null +++ b/tstest/natlab/vnet/pcap.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package vnet + +import ( + "io" + "os" + "sync" + + "github.com/google/gopacket" + "github.com/google/gopacket/pcapgo" +) + +// pcapWriter is a pcapgo.NgWriter that writes to a file. +// It is safe for concurrent use. The nil value is a no-op. +type pcapWriter struct { + f *os.File + + mu sync.Mutex + w *pcapgo.NgWriter +} + +func (p *pcapWriter) WritePacket(ci gopacket.CaptureInfo, data []byte) error { + if p == nil { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + if p.w == nil { + return io.ErrClosedPipe + } + return p.w.WritePacket(ci, data) +} + +func (p *pcapWriter) AddInterface(i pcapgo.NgInterface) (int, error) { + if p == nil { + return 0, nil + } + p.mu.Lock() + defer p.mu.Unlock() + return p.w.AddInterface(i) +} + +func (p *pcapWriter) Close() error { + if p == nil { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + if p.w != nil { + p.w.Flush() + p.w = nil + } + return p.f.Close() +} diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go new file mode 100644 index 0000000000000..0205559c9aacd --- /dev/null +++ b/tstest/natlab/vnet/vnet.go @@ -0,0 +1,1738 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package vnet simulates a virtual Internet containing a set of networks with various +// NAT behaviors. You can then plug VMs into the virtual internet at different points +// to test Tailscale working end-to-end in various network conditions. +// +// See https://github.com/tailscale/tailscale/issues/13038 +package vnet + +// TODO: +// - [ ] port mapping actually working +// - [ ] conf to let you firewall things +// - [ ] tests for NAT tables + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/rand/v2" + "net" + "net/http" + "net/http/httptest" + "net/netip" + "os/exec" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "go4.org/mem" + "gvisor.dev/gvisor/pkg/buffer" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/link/channel" + "gvisor.dev/gvisor/pkg/tcpip/network/arp" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/icmp" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" + "gvisor.dev/gvisor/pkg/waiter" + "tailscale.com/client/tailscale" + "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/net/netutil" + "tailscale.com/net/stun" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/tstest/integration/testcontrol" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/util/mak" + "tailscale.com/util/must" + "tailscale.com/util/set" + "tailscale.com/util/zstdframe" +) + +const nicID = 1 +const stunPort = 3478 + +func (s *Server) PopulateDERPMapIPs() error { + out, err := exec.Command("tailscale", "debug", "derp-map").Output() + if err != nil { + return fmt.Errorf("tailscale debug derp-map: %v", err) + } + var dm tailcfg.DERPMap + if err := json.Unmarshal(out, &dm); err != nil { + return fmt.Errorf("unmarshal DERPMap: %v", err) + } + for _, r := range dm.Regions { + for _, n := range r.Nodes { + if n.IPv4 != "" { + s.derpIPs.Add(netip.MustParseAddr(n.IPv4)) + } + } + } + return nil +} + +func (n *network) InitNAT(natType NAT) error { + ctor, ok := natTypes[natType] + if !ok { + return fmt.Errorf("unknown NAT type %q", natType) + } + t, err := ctor(n) + if err != nil { + return fmt.Errorf("error creating NAT type %q for network %v: %w", natType, n.wanIP, err) + } + n.setNATTable(t) + n.natStyle.Store(natType) + return nil +} + +func (n *network) setNATTable(nt NATTable) { + n.natMu.Lock() + defer n.natMu.Unlock() + n.natTable = nt +} + +// SoleLANIP implements [IPPool]. +func (n *network) SoleLANIP() (netip.Addr, bool) { + if len(n.nodesByIP) != 1 { + return netip.Addr{}, false + } + for ip := range n.nodesByIP { + return ip, true + } + return netip.Addr{}, false +} + +// WANIP implements [IPPool]. +func (n *network) WANIP() netip.Addr { return n.wanIP } + +func (n *network) initStack() error { + n.ns = stack.New(stack.Options{ + NetworkProtocols: []stack.NetworkProtocolFactory{ + ipv4.NewProtocol, + arp.NewProtocol, + }, + TransportProtocols: []stack.TransportProtocolFactory{ + tcp.NewProtocol, + icmp.NewProtocol4, + }, + }) + sackEnabledOpt := tcpip.TCPSACKEnabled(true) // TCP SACK is disabled by default + tcpipErr := n.ns.SetTransportProtocolOption(tcp.ProtocolNumber, &sackEnabledOpt) + if tcpipErr != nil { + return fmt.Errorf("SetTransportProtocolOption SACK: %v", tcpipErr) + } + n.linkEP = channel.New(512, 1500, tcpip.LinkAddress(n.mac.HWAddr())) + if tcpipProblem := n.ns.CreateNIC(nicID, n.linkEP); tcpipProblem != nil { + return fmt.Errorf("CreateNIC: %v", tcpipProblem) + } + n.ns.SetPromiscuousMode(nicID, true) + n.ns.SetSpoofing(nicID, true) + + prefix := tcpip.AddrFrom4Slice(n.lanIP.Addr().AsSlice()).WithPrefix() + prefix.PrefixLen = n.lanIP.Bits() + if tcpProb := n.ns.AddProtocolAddress(nicID, tcpip.ProtocolAddress{ + Protocol: ipv4.ProtocolNumber, + AddressWithPrefix: prefix, + }, stack.AddressProperties{}); tcpProb != nil { + return errors.New(tcpProb.String()) + } + + ipv4Subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(make([]byte, 4)), tcpip.MaskFromBytes(make([]byte, 4))) + if err != nil { + return fmt.Errorf("could not create IPv4 subnet: %v", err) + } + n.ns.SetRouteTable([]tcpip.Route{ + { + Destination: ipv4Subnet, + NIC: nicID, + }, + }) + + const tcpReceiveBufferSize = 0 // default + const maxInFlightConnectionAttempts = 8192 + tcpFwd := tcp.NewForwarder(n.ns, tcpReceiveBufferSize, maxInFlightConnectionAttempts, n.acceptTCP) + n.ns.SetTransportProtocolHandler(tcp.ProtocolNumber, func(tei stack.TransportEndpointID, pb *stack.PacketBuffer) (handled bool) { + return tcpFwd.HandlePacket(tei, pb) + }) + + go func() { + for { + pkt := n.linkEP.ReadContext(n.s.shutdownCtx) + if pkt == nil { + if n.s.shutdownCtx.Err() != nil { + // Return without logging. + return + } + continue + } + + ipRaw := pkt.ToView().AsSlice() + goPkt := gopacket.NewPacket( + ipRaw, + layers.LayerTypeIPv4, gopacket.Lazy) + layerV4 := goPkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + + dstIP, _ := netip.AddrFromSlice(layerV4.DstIP) + node, ok := n.nodesByIP[dstIP] + if !ok { + log.Printf("no MAC for dest IP %v", dstIP) + continue + } + eth := &layers.Ethernet{ + SrcMAC: n.mac.HWAddr(), + DstMAC: node.mac.HWAddr(), + EthernetType: layers.EthernetTypeIPv4, + } + buffer := gopacket.NewSerializeBuffer() + options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + sls := []gopacket.SerializableLayer{ + eth, + } + for _, layer := range goPkt.Layers() { + sl, ok := layer.(gopacket.SerializableLayer) + if !ok { + log.Fatalf("layer %s is not serializable", layer.LayerType().String()) + } + switch gl := layer.(type) { + case *layers.TCP: + gl.SetNetworkLayerForChecksum(layerV4) + case *layers.UDP: + gl.SetNetworkLayerForChecksum(layerV4) + } + sls = append(sls, sl) + } + + if err := gopacket.SerializeLayers(buffer, options, sls...); err != nil { + log.Printf("Serialize error: %v", err) + continue + } + if writeFunc, ok := n.writeFunc.Load(node.mac); ok { + writeFunc(buffer.Bytes()) + } else { + log.Printf("No writeFunc for %v", node.mac) + } + } + }() + return nil +} + +func netaddrIPFromNetstackIP(s tcpip.Address) netip.Addr { + switch s.Len() { + case 4: + return netip.AddrFrom4(s.As4()) + case 16: + return netip.AddrFrom16(s.As16()).Unmap() + } + return netip.Addr{} +} + +func stringifyTEI(tei stack.TransportEndpointID) string { + localHostPort := net.JoinHostPort(tei.LocalAddress.String(), strconv.Itoa(int(tei.LocalPort))) + remoteHostPort := net.JoinHostPort(tei.RemoteAddress.String(), strconv.Itoa(int(tei.RemotePort))) + return fmt.Sprintf("%s -> %s", remoteHostPort, localHostPort) +} + +func (n *network) acceptTCP(r *tcp.ForwarderRequest) { + reqDetails := r.ID() + + clientRemoteIP := netaddrIPFromNetstackIP(reqDetails.RemoteAddress) + destIP := netaddrIPFromNetstackIP(reqDetails.LocalAddress) + destPort := reqDetails.LocalPort + if !clientRemoteIP.IsValid() { + r.Complete(true) // sends a RST + return + } + + var wq waiter.Queue + ep, err := r.CreateEndpoint(&wq) + if err != nil { + log.Printf("CreateEndpoint error for %s: %v", stringifyTEI(reqDetails), err) + r.Complete(true) // sends a RST + return + } + ep.SocketOptions().SetKeepAlive(true) + + if destPort == 123 { + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + io.WriteString(tc, "Hello from Go\nGoodbye.\n") + tc.Close() + return + } + + if destPort == 124 { + node, ok := n.nodesByIP[clientRemoteIP] + if !ok { + log.Printf("no node for TCP 124 connection from %v", clientRemoteIP) + r.Complete(true) + return + } + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + + go func() { + defer tc.Close() + bs := bufio.NewScanner(tc) + for bs.Scan() { + line := bs.Text() + log.Printf("LOG from %v: %s", node, line) + } + }() + return + } + + if destPort == 8008 && destIP == fakeTestAgentIP { + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + node := n.nodesByIP[clientRemoteIP] + ac := &agentConn{node, tc} + n.s.addIdleAgentConn(ac) + return + } + + if destPort == 80 && destIP == fakeControlIP { + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + hs := &http.Server{Handler: n.s.control} + go hs.Serve(netutil.NewOneConnListener(tc, nil)) + return + } + + if destPort == 443 && (destIP == fakeDERP1IP || destIP == fakeDERP2IP) { + ds := n.s.derps[0] + if destIP == fakeDERP2IP { + ds = n.s.derps[1] + } + + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + tlsConn := tls.Server(tc, ds.tlsConfig) + hs := &http.Server{Handler: ds.handler} + go hs.Serve(netutil.NewOneConnListener(tlsConn, nil)) + return + } + if destPort == 80 && (destIP == fakeDERP1IP || destIP == fakeDERP2IP) { + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + hs := &http.Server{Handler: n.s.derps[0].handler} + go hs.Serve(netutil.NewOneConnListener(tc, nil)) + return + } + if destPort == 443 && destIP == fakeLogCatcherIP { + + r.Complete(false) + tc := gonet.NewTCPConn(&wq, ep) + go n.serveLogCatcherConn(clientRemoteIP, tc) + return + } + + log.Printf("vnet-AcceptTCP: %v", stringifyTEI(reqDetails)) + + var targetDial string + if n.s.derpIPs.Contains(destIP) { + targetDial = destIP.String() + ":" + strconv.Itoa(int(destPort)) + } else if destIP == fakeProxyControlplaneIP { + targetDial = "controlplane.tailscale.com:" + strconv.Itoa(int(destPort)) + } + if targetDial != "" { + c, err := net.Dial("tcp", targetDial) + if err != nil { + r.Complete(true) + log.Printf("Dial controlplane: %v", err) + return + } + defer c.Close() + tc := gonet.NewTCPConn(&wq, ep) + defer tc.Close() + r.Complete(false) + errc := make(chan error, 2) + go func() { _, err := io.Copy(tc, c); errc <- err }() + go func() { _, err := io.Copy(c, tc); errc <- err }() + <-errc + } else { + r.Complete(true) // sends a RST + } +} + +// serveLogCatchConn serves a TCP connection to "log.tailscale.io", speaking the +// logtail/logcatcher protocol. +// +// We terminate TLS with an arbitrary cert; the client is configured to not +// validate TLS certs for this hostname when running under these integration +// tests. +func (n *network) serveLogCatcherConn(clientRemoteIP netip.Addr, c net.Conn) { + tlsConfig := n.s.derps[0].tlsConfig // self-signed (stealing DERP's); test client configure to not check + tlsConn := tls.Server(c, tlsConfig) + var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + all, _ := io.ReadAll(r.Body) + if r.Header.Get("Content-Encoding") == "zstd" { + var err error + all, err = zstdframe.AppendDecode(nil, all) + if err != nil { + log.Printf("LOGS DECODE ERROR zstd decode: %v", err) + http.Error(w, "zstd decode error", http.StatusBadRequest) + return + } + } + var logs []struct { + Logtail struct { + Client_Time time.Time + } + Text string + } + if err := json.Unmarshal(all, &logs); err != nil { + log.Printf("Logs decode error: %v", err) + return + } + node := n.nodesByIP[clientRemoteIP] + if node != nil { + node.logMu.Lock() + defer node.logMu.Unlock() + node.logCatcherWrites++ + for _, lg := range logs { + tStr := lg.Logtail.Client_Time.Round(time.Millisecond).Format(time.RFC3339Nano) + fmt.Fprintf(&node.logBuf, "[%v] %s\n", tStr, lg.Text) + } + } + }) + hs := &http.Server{Handler: handler} + hs.Serve(netutil.NewOneConnListener(tlsConn, nil)) +} + +var ( + fakeDNSIP = netip.AddrFrom4([4]byte{4, 11, 4, 11}) + fakeProxyControlplaneIP = netip.AddrFrom4([4]byte{52, 52, 0, 1}) // real controlplane.tailscale.com proxy + fakeTestAgentIP = netip.AddrFrom4([4]byte{52, 52, 0, 2}) + fakeControlIP = netip.AddrFrom4([4]byte{52, 52, 0, 3}) // 3=C for "Control" + fakeDERP1IP = netip.AddrFrom4([4]byte{33, 4, 0, 1}) // 3340=DERP; 1=derp 1 + fakeDERP2IP = netip.AddrFrom4([4]byte{33, 4, 0, 2}) // 3340=DERP; 1=derp 1 + fakeLogCatcherIP = netip.AddrFrom4([4]byte{52, 52, 0, 4}) + fakeSyslogIP = netip.AddrFrom4([4]byte{52, 52, 0, 9}) +) + +type EthernetPacket struct { + le *layers.Ethernet + gp gopacket.Packet +} + +func (ep EthernetPacket) SrcMAC() MAC { + return MAC(ep.le.SrcMAC) +} + +func (ep EthernetPacket) DstMAC() MAC { + return MAC(ep.le.DstMAC) +} + +type MAC [6]byte + +func (m MAC) IsBroadcast() bool { + return m == MAC{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +} + +func macOf(hwa net.HardwareAddr) (_ MAC, ok bool) { + if len(hwa) != 6 { + return MAC{}, false + } + return MAC(hwa), true +} + +func (m MAC) HWAddr() net.HardwareAddr { + return net.HardwareAddr(m[:]) +} + +func (m MAC) String() string { + return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", m[0], m[1], m[2], m[3], m[4], m[5]) +} + +type portMapping struct { + dst netip.AddrPort // LAN IP:port + expiry time.Time +} + +type network struct { + s *Server + mac MAC + portmap bool + lanInterfaceID int + wanInterfaceID int + wanIP netip.Addr + lanIP netip.Prefix // with host bits set (e.g. 192.168.2.1/24) + nodesByIP map[netip.Addr]*node + logf func(format string, args ...any) + + ns *stack.Stack + linkEP *channel.Endpoint + + natStyle syncs.AtomicValue[NAT] + natMu sync.Mutex // held while using + changing natTable + natTable NATTable + portMap map[netip.AddrPort]portMapping // WAN ip:port -> LAN ip:port + portMapFlow map[portmapFlowKey]netip.AddrPort // (lanAP, peerWANAP) -> portmapped wanAP + + // writeFunc is a map of MAC -> func to write to that MAC. + // It contains entries for connected nodes only. + writeFunc syncs.Map[MAC, func([]byte)] // MAC -> func to write to that MAC +} + +func (n *network) registerWriter(mac MAC, f func([]byte)) { + if f != nil { + n.writeFunc.Store(mac, f) + } else { + n.writeFunc.Delete(mac) + } +} + +func (n *network) MACOfIP(ip netip.Addr) (_ MAC, ok bool) { + if n.lanIP.Addr() == ip { + return n.mac, true + } + if n, ok := n.nodesByIP[ip]; ok { + return n.mac, true + } + return MAC{}, false +} + +type node struct { + mac MAC + num int // 1-based node number + interfaceID int + net *network + lanIP netip.Addr // must be in net.lanIP prefix + unique in net + verboseSyslog bool + + // logMu guards logBuf. + // TODO(bradfitz): conditionally write these out to separate files at the end? + // Currently they only hold logcatcher logs. + logMu sync.Mutex + logBuf bytes.Buffer + logCatcherWrites int +} + +// String returns the string "nodeN" where N is the 1-based node number. +func (n *node) String() string { + return fmt.Sprintf("node%d", n.num) +} + +type derpServer struct { + srv *derp.Server + handler http.Handler + tlsConfig *tls.Config +} + +func newDERPServer() *derpServer { + // Just to get a self-signed TLS cert: + ts := httptest.NewTLSServer(nil) + ts.Close() + + ds := &derpServer{ + srv: derp.NewServer(key.NewNode(), logger.Discard), + tlsConfig: ts.TLS, // self-signed; test client configure to not check + } + var mux http.ServeMux + mux.Handle("/derp", derphttp.Handler(ds.srv)) + mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + + ds.handler = &mux + return ds +} + +type Server struct { + shutdownCtx context.Context + shutdownCancel context.CancelFunc + shuttingDown atomic.Bool + wg sync.WaitGroup + blendReality bool + + derpIPs set.Set[netip.Addr] + + nodes []*node + nodeByMAC map[MAC]*node + networks set.Set[*network] + networkByWAN map[netip.Addr]*network + + control *testcontrol.Server + derps []*derpServer + pcapWriter *pcapWriter + + mu sync.Mutex + agentConnWaiter map[*node]chan<- struct{} // signaled after added to set + agentConns set.Set[*agentConn] // not keyed by node; should be small/cheap enough to scan all + agentDialer map[*node]DialFunc +} + +type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) + +var derpMap = &tailcfg.DERPMap{ + Regions: map[int]*tailcfg.DERPRegion{ + 1: { + RegionID: 1, + RegionCode: "atlantis", + RegionName: "Atlantis", + Nodes: []*tailcfg.DERPNode{ + { + Name: "1a", + RegionID: 1, + HostName: "derp1.tailscale", + IPv4: fakeDERP1IP.String(), + InsecureForTests: true, + CanPort80: true, + }, + }, + }, + 2: { + RegionID: 2, + RegionCode: "northpole", + RegionName: "North Pole", + Nodes: []*tailcfg.DERPNode{ + { + Name: "2a", + RegionID: 2, + HostName: "derp2.tailscale", + IPv4: fakeDERP2IP.String(), + InsecureForTests: true, + CanPort80: true, + }, + }, + }, + }, +} + +func New(c *Config) (*Server, error) { + ctx, cancel := context.WithCancel(context.Background()) + s := &Server{ + shutdownCtx: ctx, + shutdownCancel: cancel, + + control: &testcontrol.Server{ + DERPMap: derpMap, + ExplicitBaseURL: "http://control.tailscale", + }, + + derpIPs: set.Of[netip.Addr](), + + nodeByMAC: map[MAC]*node{}, + networkByWAN: map[netip.Addr]*network{}, + networks: set.Of[*network](), + } + for range 2 { + s.derps = append(s.derps, newDERPServer()) + } + if err := s.initFromConfig(c); err != nil { + return nil, err + } + for n := range s.networks { + if err := n.initStack(); err != nil { + return nil, fmt.Errorf("newServer: initStack: %v", err) + } + } + + return s, nil +} + +func (s *Server) Close() { + if shutdown := s.shuttingDown.Swap(true); !shutdown { + s.shutdownCancel() + s.pcapWriter.Close() + } + s.wg.Wait() +} + +func (s *Server) HWAddr(mac MAC) net.HardwareAddr { + // TODO: cache + return net.HardwareAddr(mac[:]) +} + +// IPv4ForDNS returns the IP address for the given DNS query name (for IPv4 A +// queries only). +func (s *Server) IPv4ForDNS(qname string) (netip.Addr, bool) { + switch qname { + case "dns": + return fakeDNSIP, true + case "log.tailscale.io": + return fakeLogCatcherIP, true + case "test-driver.tailscale": + return fakeTestAgentIP, true + case "controlplane.tailscale.com": + return fakeProxyControlplaneIP, true + case "control.tailscale": + return fakeControlIP, true + case "derp1.tailscale": + return fakeDERP1IP, true + case "derp2.tailscale": + return fakeDERP2IP, true + } + return netip.Addr{}, false +} + +type Protocol int + +const ( + ProtocolQEMU = Protocol(iota + 1) + ProtocolUnixDGRAM // for macOS Hypervisor.Framework and VZFileHandleNetworkDeviceAttachment +) + +// serveConn serves a single connection from a client. +func (s *Server) ServeUnixConn(uc *net.UnixConn, proto Protocol) { + if s.shuttingDown.Load() { + return + } + s.wg.Add(1) + defer s.wg.Done() + context.AfterFunc(s.shutdownCtx, func() { + uc.SetDeadline(time.Now()) + }) + log.Printf("Got conn %T %p", uc, uc) + defer uc.Close() + + bw := bufio.NewWriterSize(uc, 2<<10) + var writeMu sync.Mutex + var srcNode *node + writePkt := func(pkt []byte) { + if pkt == nil { + return + } + writeMu.Lock() + defer writeMu.Unlock() + if proto == ProtocolQEMU { + hdr := binary.BigEndian.AppendUint32(bw.AvailableBuffer()[:0], uint32(len(pkt))) + if _, err := bw.Write(hdr); err != nil { + log.Printf("Write hdr: %v", err) + return + } + } + if _, err := bw.Write(pkt); err != nil { + log.Printf("Write pkt: %v", err) + return + } + if err := bw.Flush(); err != nil { + log.Printf("Flush: %v", err) + } + must.Do(s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(pkt), + Length: len(pkt), + InterfaceIndex: srcNode.interfaceID, + }, pkt)) + } + + buf := make([]byte, 16<<10) + var netw *network // non-nil after first packet + for { + var packetRaw []byte + if proto == ProtocolUnixDGRAM { + n, _, err := uc.ReadFromUnix(buf) + if err != nil { + log.Printf("ReadFromUnix: %v", err) + continue + } + packetRaw = buf[:n] + } else if proto == ProtocolQEMU { + if _, err := io.ReadFull(uc, buf[:4]); err != nil { + if s.shutdownCtx.Err() != nil { + // Return without logging. + return + } + log.Printf("ReadFull header: %v", err) + return + } + n := binary.BigEndian.Uint32(buf[:4]) + + if _, err := io.ReadFull(uc, buf[4:4+n]); err != nil { + if s.shutdownCtx.Err() != nil { + // Return without logging. + return + } + log.Printf("ReadFull pkt: %v", err) + return + } + packetRaw = buf[4 : 4+n] // raw ethernet frame + } + + packet := gopacket.NewPacket(packetRaw, layers.LayerTypeEthernet, gopacket.Lazy) + le, ok := packet.LinkLayer().(*layers.Ethernet) + if !ok || len(le.SrcMAC) != 6 || len(le.DstMAC) != 6 { + continue + } + ep := EthernetPacket{le, packet} + + srcMAC := ep.SrcMAC() + if srcNode == nil { + srcNode, ok = s.nodeByMAC[srcMAC] + if !ok { + log.Printf("[conn %p] ignoring frame from unknown MAC %v", uc, srcMAC) + continue + } + log.Printf("[conn %p] MAC %v is node %v", uc, srcMAC, srcNode.lanIP) + netw = srcNode.net + netw.registerWriter(srcMAC, writePkt) + defer netw.registerWriter(srcMAC, nil) + } else { + if srcMAC != srcNode.mac { + log.Printf("[conn %p] ignoring frame from MAC %v, expected %v", uc, srcMAC, srcNode.mac) + continue + } + } + must.Do(s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(packetRaw), + Length: len(packetRaw), + InterfaceIndex: srcNode.interfaceID, + }, packetRaw)) + netw.HandleEthernetPacket(ep) + } +} + +func (s *Server) routeUDPPacket(up UDPPacket) { + // Find which network owns this based on the destination IP + // and all the known networks' wan IPs. + + // But certain things (like STUN) we do in-process. + if up.Dst.Port() == stunPort { + // TODO(bradfitz): fake latency; time.AfterFunc the response + if res, ok := makeSTUNReply(up); ok { + //log.Printf("STUN reply: %+v", res) + s.routeUDPPacket(res) + } else { + log.Printf("weird: STUN packet not handled") + } + return + } + + dstIP := up.Dst.Addr() + netw, ok := s.networkByWAN[dstIP] + if !ok { + if dstIP.IsPrivate() { + // Not worth spamming logs. RFC 1918 space doesn't route. + return + } + log.Printf("no network to route UDP packet for %v", up.Dst) + return + } + netw.HandleUDPPacket(up) +} + +// writeEth writes a raw Ethernet frame to all (0, 1, or multiple) connected +// clients on the network. +// +// This only delivers to client devices and not the virtual router/gateway +// device. +func (n *network) writeEth(res []byte) { + if len(res) < 12 { + return + } + dstMAC := MAC(res[0:6]) + srcMAC := MAC(res[6:12]) + if dstMAC.IsBroadcast() { + n.writeFunc.Range(func(mac MAC, writeFunc func([]byte)) bool { + writeFunc(res) + return true + }) + return + } + if srcMAC == dstMAC { + n.logf("dropping write of packet from %v to itself", srcMAC) + return + } + if writeFunc, ok := n.writeFunc.Load(dstMAC); ok { + writeFunc(res) + return + } +} + +func (n *network) HandleEthernetPacket(ep EthernetPacket) { + packet := ep.gp + dstMAC := ep.DstMAC() + isBroadcast := dstMAC.IsBroadcast() + forRouter := dstMAC == n.mac || isBroadcast + + switch ep.le.EthernetType { + default: + n.logf("Dropping non-IP packet: %v", ep.le.EthernetType) + return + case layers.EthernetTypeARP: + res, err := n.createARPResponse(packet) + if err != nil { + n.logf("createARPResponse: %v", err) + } else { + n.writeEth(res) + } + return + case layers.EthernetTypeIPv6: + // One day. Low value for now. IPv4 NAT modes is the main thing + // this project wants to test. + return + case layers.EthernetTypeIPv4: + // Below + } + + // Send ethernet broadcasts and unicast ethernet frames to peers + // on the same network. This is all LAN traffic that isn't meant + // for the router/gw itself: + n.writeEth(ep.gp.Data()) + + if forRouter { + n.HandleEthernetIPv4PacketForRouter(ep) + } +} + +// HandleUDPPacket handles a UDP packet arriving from the internet, +// addressed to the router's WAN IP. It is then NATed back to a +// LAN IP here and wrapped in an ethernet layer and delivered +// to the network. +func (n *network) HandleUDPPacket(p UDPPacket) { + buf, err := n.serializedUDPPacket(p.Src, p.Dst, p.Payload, nil) + if err != nil { + n.logf("serializing UDP packet: %v", err) + return + } + n.s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(buf), + Length: len(buf), + InterfaceIndex: n.wanInterfaceID, + }, buf) + dst := n.doNATIn(p.Src, p.Dst) + if !dst.IsValid() { + n.logf("Warning: NAT dropped packet; no mapping for %v=>%v", p.Src, p.Dst) + return + } + p.Dst = dst + buf, err = n.serializedUDPPacket(p.Src, p.Dst, p.Payload, nil) + if err != nil { + n.logf("serializing UDP packet: %v", err) + return + } + n.s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(buf), + Length: len(buf), + InterfaceIndex: n.lanInterfaceID, + }, buf) + n.WriteUDPPacketNoNAT(p) +} + +// WriteUDPPacketNoNAT writes a UDP packet to the network, without +// doing any NAT translation. +// +// The packet will always have the ethernet src MAC of the router +// so this should not be used for packets between clients on the +// same ethernet segment. +func (n *network) WriteUDPPacketNoNAT(p UDPPacket) { + src, dst := p.Src, p.Dst + node, ok := n.nodesByIP[dst.Addr()] + if !ok { + n.logf("no node for dest IP %v in UDP packet %v=>%v", dst.Addr(), p.Src, p.Dst) + return + } + + eth := &layers.Ethernet{ + SrcMAC: n.mac.HWAddr(), // of gateway + DstMAC: node.mac.HWAddr(), + EthernetType: layers.EthernetTypeIPv4, + } + ethRaw, err := n.serializedUDPPacket(src, dst, p.Payload, eth) + if err != nil { + n.logf("serializing UDP packet: %v", err) + return + } + n.writeEth(ethRaw) +} + +// serializedUDPPacket serializes a UDP packet with the given source and +// destination IP:port pairs, and payload. +// +// If eth is non-nil, it will be used as the Ethernet layer, otherwise the +// Ethernet layer will be omitted from the serialization. +func (n *network) serializedUDPPacket(src, dst netip.AddrPort, payload []byte, eth *layers.Ethernet) ([]byte, error) { + ip := &layers.IPv4{ + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + SrcIP: src.Addr().AsSlice(), + DstIP: dst.Addr().AsSlice(), + } + udp := &layers.UDP{ + SrcPort: layers.UDPPort(src.Port()), + DstPort: layers.UDPPort(dst.Port()), + } + udp.SetNetworkLayerForChecksum(ip) + + buffer := gopacket.NewSerializeBuffer() + options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + layers := []gopacket.SerializableLayer{eth, ip, udp, gopacket.Payload(payload)} + if eth == nil { + layers = layers[1:] + } + if err := gopacket.SerializeLayers(buffer, options, layers...); err != nil { + return nil, fmt.Errorf("serializing UDP: %v", err) + } + return buffer.Bytes(), nil +} + +// HandleEthernetIPv4PacketForRouter handles an IPv4 packet that is +// directed to the router/gateway itself. The packet may be to the +// broadcast MAC address, or to the router's MAC address. The target +// IP may be the router's IP, or an internet (routed) IP. +func (n *network) HandleEthernetIPv4PacketForRouter(ep EthernetPacket) { + packet := ep.gp + + v4, ok := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + if !ok { + return + } + srcIP, _ := netip.AddrFromSlice(v4.SrcIP) + dstIP, _ := netip.AddrFromSlice(v4.DstIP) + toForward := dstIP != n.lanIP.Addr() && dstIP != netip.IPv4Unspecified() + udp, isUDP := packet.Layer(layers.LayerTypeUDP).(*layers.UDP) + + if isDHCPRequest(packet) { + res, err := n.s.createDHCPResponse(packet) + if err != nil { + n.logf("createDHCPResponse: %v", err) + return + } + n.writeEth(res) + return + } + + if isMDNSQuery(packet) || isIGMP(packet) { + // Don't log. Spammy for now. + return + } + + if isDNSRequest(packet) { + // TODO(bradfitz): restrict this to 4.11.4.11? add DNS + // on gateway instead? + res, err := n.s.createDNSResponse(packet) + if err != nil { + n.logf("createDNSResponse: %v", err) + return + } + n.writeEth(res) + return + } + + if isUDP && dstIP == fakeSyslogIP { + node, ok := n.nodesByIP[srcIP] + if !ok { + return + } + if node.verboseSyslog { + // TODO(bradfitz): parse this and capture it, structured, into + // node's log buffer. + log.Printf("syslog from %v: %s", node, udp.Payload) + } + return + } + + if !toForward && isNATPMP(packet) { + n.handleNATPMPRequest(UDPPacket{ + Src: netip.AddrPortFrom(srcIP, uint16(udp.SrcPort)), + Dst: netip.AddrPortFrom(dstIP, uint16(udp.DstPort)), + Payload: udp.Payload, + }) + return + } + + if toForward && isUDP { + src := netip.AddrPortFrom(srcIP, uint16(udp.SrcPort)) + dst := netip.AddrPortFrom(dstIP, uint16(udp.DstPort)) + buf, err := n.serializedUDPPacket(src, dst, udp.Payload, nil) + if err != nil { + n.logf("serializing UDP packet: %v", err) + return + } + n.s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(buf), + Length: len(buf), + InterfaceIndex: n.lanInterfaceID, + }, buf) + + src = n.doNATOut(src, dst) + buf, err = n.serializedUDPPacket(src, dst, udp.Payload, nil) + if err != nil { + n.logf("serializing UDP packet: %v", err) + return + } + n.s.pcapWriter.WritePacket(gopacket.CaptureInfo{ + Timestamp: time.Now(), + CaptureLength: len(buf), + Length: len(buf), + InterfaceIndex: n.wanInterfaceID, + }, buf) + + n.s.routeUDPPacket(UDPPacket{ + Src: src, + Dst: dst, + Payload: udp.Payload, + }) + return + } + + if toForward && n.s.shouldInterceptTCP(packet) { + ipp := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + pktCopy := make([]byte, 0, len(ipp.Contents)+len(ipp.Payload)) + pktCopy = append(pktCopy, ipp.Contents...) + pktCopy = append(pktCopy, ipp.Payload...) + packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(pktCopy), + }) + n.linkEP.InjectInbound(header.IPv4ProtocolNumber, packetBuf) + packetBuf.DecRef() + return + } + + //log.Printf("Got packet: %v", packet) +} + +func (s *Server) createDHCPResponse(request gopacket.Packet) ([]byte, error) { + ethLayer := request.Layer(layers.LayerTypeEthernet).(*layers.Ethernet) + srcMAC, ok := macOf(ethLayer.SrcMAC) + if !ok { + return nil, nil + } + node, ok := s.nodeByMAC[srcMAC] + if !ok { + log.Printf("DHCP request from unknown node %v; ignoring", srcMAC) + return nil, nil + } + gwIP := node.net.lanIP.Addr() + + ipLayer := request.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + udpLayer := request.Layer(layers.LayerTypeUDP).(*layers.UDP) + dhcpLayer := request.Layer(layers.LayerTypeDHCPv4).(*layers.DHCPv4) + + response := &layers.DHCPv4{ + Operation: layers.DHCPOpReply, + HardwareType: layers.LinkTypeEthernet, + HardwareLen: 6, + Xid: dhcpLayer.Xid, + ClientHWAddr: dhcpLayer.ClientHWAddr, + Flags: dhcpLayer.Flags, + YourClientIP: node.lanIP.AsSlice(), + Options: []layers.DHCPOption{ + { + Type: layers.DHCPOptServerID, + Data: gwIP.AsSlice(), // DHCP server's IP + Length: 4, + }, + }, + } + + var msgType layers.DHCPMsgType + for _, opt := range dhcpLayer.Options { + if opt.Type == layers.DHCPOptMessageType && opt.Length > 0 { + msgType = layers.DHCPMsgType(opt.Data[0]) + } + } + switch msgType { + case layers.DHCPMsgTypeDiscover: + response.Options = append(response.Options, layers.DHCPOption{ + Type: layers.DHCPOptMessageType, + Data: []byte{byte(layers.DHCPMsgTypeOffer)}, + Length: 1, + }) + case layers.DHCPMsgTypeRequest: + response.Options = append(response.Options, + layers.DHCPOption{ + Type: layers.DHCPOptMessageType, + Data: []byte{byte(layers.DHCPMsgTypeAck)}, + Length: 1, + }, + layers.DHCPOption{ + Type: layers.DHCPOptLeaseTime, + Data: binary.BigEndian.AppendUint32(nil, 3600), // hour? sure. + Length: 4, + }, + layers.DHCPOption{ + Type: layers.DHCPOptRouter, + Data: gwIP.AsSlice(), + Length: 4, + }, + layers.DHCPOption{ + Type: layers.DHCPOptDNS, + Data: fakeDNSIP.AsSlice(), + Length: 4, + }, + layers.DHCPOption{ + Type: layers.DHCPOptSubnetMask, + Data: net.CIDRMask(node.net.lanIP.Bits(), 32), + Length: 4, + }, + ) + } + + eth := &layers.Ethernet{ + SrcMAC: node.net.mac.HWAddr(), + DstMAC: ethLayer.SrcMAC, + EthernetType: layers.EthernetTypeIPv4, + } + + ip := &layers.IPv4{ + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + SrcIP: ipLayer.DstIP, + DstIP: ipLayer.SrcIP, + } + + udp := &layers.UDP{ + SrcPort: udpLayer.DstPort, + DstPort: udpLayer.SrcPort, + } + udp.SetNetworkLayerForChecksum(ip) + + buffer := gopacket.NewSerializeBuffer() + options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + if err := gopacket.SerializeLayers(buffer, options, + eth, + ip, + udp, + response, + ); err != nil { + return nil, err + } + + return buffer.Bytes(), nil +} + +func isDHCPRequest(pkt gopacket.Packet) bool { + v4, ok := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + if !ok || v4.Protocol != layers.IPProtocolUDP { + return false + } + udp, ok := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) + return ok && udp.DstPort == 67 && udp.SrcPort == 68 +} + +func isIGMP(pkt gopacket.Packet) bool { + return pkt.Layer(layers.LayerTypeIGMP) != nil +} + +func isMDNSQuery(pkt gopacket.Packet) bool { + udp, ok := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) + // TODO(bradfitz): also check IPv4 DstIP=224.0.0.251 (or whatever) + return ok && udp.SrcPort == 5353 && udp.DstPort == 5353 +} + +func (s *Server) shouldInterceptTCP(pkt gopacket.Packet) bool { + tcp, ok := pkt.Layer(layers.LayerTypeTCP).(*layers.TCP) + if !ok { + return false + } + ipv4, ok := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + if !ok { + return false + } + if tcp.DstPort == 123 || tcp.DstPort == 124 { + return true + } + dstIP, _ := netip.AddrFromSlice(ipv4.DstIP.To4()) + if tcp.DstPort == 80 || tcp.DstPort == 443 { + switch dstIP { + case fakeControlIP, fakeDERP1IP, fakeDERP2IP, fakeLogCatcherIP: + return true + } + if dstIP == fakeProxyControlplaneIP { + return s.blendReality + } + if s.derpIPs.Contains(dstIP) { + return true + } + } + if tcp.DstPort == 8008 && dstIP == fakeTestAgentIP { + // Connection from cmd/tta. + return true + } + return false +} + +// isDNSRequest reports whether pkt is a DNS request to the fake DNS server. +func isDNSRequest(pkt gopacket.Packet) bool { + udp, ok := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) + if !ok || udp.DstPort != 53 { + return false + } + ip, ok := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + if !ok { + return false + } + dstIP, ok := netip.AddrFromSlice(ip.DstIP) + if !ok || dstIP != fakeDNSIP { + return false + } + dns, ok := pkt.Layer(layers.LayerTypeDNS).(*layers.DNS) + return ok && dns.QR == false && len(dns.Questions) > 0 +} + +func isNATPMP(pkt gopacket.Packet) bool { + udp, ok := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) + return ok && udp.DstPort == 5351 && len(udp.Payload) > 0 && udp.Payload[0] == 0 // version 0, not 2 for PCP +} + +func makeSTUNReply(req UDPPacket) (res UDPPacket, ok bool) { + txid, err := stun.ParseBindingRequest(req.Payload) + if err != nil { + log.Printf("invalid STUN request: %v", err) + return res, false + } + return UDPPacket{ + Src: req.Dst, + Dst: req.Src, + Payload: stun.Response(txid, req.Src), + }, true +} + +func (s *Server) createDNSResponse(pkt gopacket.Packet) ([]byte, error) { + ethLayer := pkt.Layer(layers.LayerTypeEthernet).(*layers.Ethernet) + ipLayer := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4) + udpLayer := pkt.Layer(layers.LayerTypeUDP).(*layers.UDP) + dnsLayer := pkt.Layer(layers.LayerTypeDNS).(*layers.DNS) + + if dnsLayer.OpCode != layers.DNSOpCodeQuery || dnsLayer.QR || len(dnsLayer.Questions) == 0 { + return nil, nil + } + + response := &layers.DNS{ + ID: dnsLayer.ID, + QR: true, + AA: true, + TC: false, + RD: dnsLayer.RD, + RA: true, + OpCode: layers.DNSOpCodeQuery, + ResponseCode: layers.DNSResponseCodeNoErr, + } + + var names []string + for _, q := range dnsLayer.Questions { + response.QDCount++ + response.Questions = append(response.Questions, q) + + if mem.HasSuffix(mem.B(q.Name), mem.S(".pool.ntp.org")) { + // Just drop DNS queries for NTP servers. For Debian/etc guests used + // during development. Not needed. Assume VM guests get correct time + // via their hypervisor. + return nil, nil + } + + names = append(names, q.Type.String()+"/"+string(q.Name)) + if q.Class != layers.DNSClassIN || q.Type != layers.DNSTypeA { + continue + } + + if ip, ok := s.IPv4ForDNS(string(q.Name)); ok { + response.ANCount++ + response.Answers = append(response.Answers, layers.DNSResourceRecord{ + Name: q.Name, + Type: q.Type, + Class: q.Class, + IP: ip.AsSlice(), + TTL: 60, + }) + } + } + + eth2 := &layers.Ethernet{ + SrcMAC: ethLayer.DstMAC, + DstMAC: ethLayer.SrcMAC, + EthernetType: layers.EthernetTypeIPv4, + } + ip2 := &layers.IPv4{ + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + SrcIP: ipLayer.DstIP, + DstIP: ipLayer.SrcIP, + } + udp2 := &layers.UDP{ + SrcPort: udpLayer.DstPort, + DstPort: udpLayer.SrcPort, + } + udp2.SetNetworkLayerForChecksum(ip2) + + buffer := gopacket.NewSerializeBuffer() + options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + if err := gopacket.SerializeLayers(buffer, options, eth2, ip2, udp2, response); err != nil { + return nil, err + } + + const debugDNS = false + if debugDNS { + if len(response.Answers) > 0 { + back := gopacket.NewPacket(buffer.Bytes(), layers.LayerTypeEthernet, gopacket.Lazy) + log.Printf("Generated: %v", back) + } else { + log.Printf("made empty response for %q", names) + } + } + + return buffer.Bytes(), nil +} + +// doNATOut performs NAT on an outgoing packet from src to dst, where +// src is a LAN IP and dst is a WAN IP. +// +// It returns the source WAN ip:port to use. +func (n *network) doNATOut(src, dst netip.AddrPort) (newSrc netip.AddrPort) { + n.natMu.Lock() + defer n.natMu.Unlock() + + // First see if there's a port mapping, before doing NAT. + if wanAP, ok := n.portMapFlow[portmapFlowKey{ + peerWAN: dst, + lanAP: src, + }]; ok { + return wanAP + } + + return n.natTable.PickOutgoingSrc(src, dst, time.Now()) +} + +type portmapFlowKey struct { + peerWAN netip.AddrPort // the peer's WAN ip:port + lanAP netip.AddrPort +} + +// doNATIn performs NAT on an incoming packet from WAN src to WAN dst, returning +// a new destination LAN ip:port to use. +func (n *network) doNATIn(src, dst netip.AddrPort) (newDst netip.AddrPort) { + n.natMu.Lock() + defer n.natMu.Unlock() + + now := time.Now() + + // First see if there's a port mapping, before doing NAT. + if lanAP, ok := n.portMap[dst]; ok { + if now.Before(lanAP.expiry) { + mak.Set(&n.portMapFlow, portmapFlowKey{ + peerWAN: src, + lanAP: lanAP.dst, + }, dst) + //n.logf("NAT: doNatIn: port mapping %v=>%v", dst, lanAP.dst) + return lanAP.dst + } + n.logf("NAT: doNatIn: port mapping EXPIRED for %v=>%v", dst, lanAP.dst) + delete(n.portMap, dst) + return netip.AddrPort{} + } + + return n.natTable.PickIncomingDst(src, dst, now) +} + +// IsPublicPortUsed reports whether the given public port is currently in use. +// +// n.natMu must be held by the caller. (It's only called by nat implementations +// which are always called with natMu held)) +func (n *network) IsPublicPortUsed(ap netip.AddrPort) bool { + _, ok := n.portMap[ap] + return ok +} + +func (n *network) doPortMap(src netip.Addr, dstLANPort, wantExtPort uint16, sec int) (gotPort uint16, ok bool) { + n.natMu.Lock() + defer n.natMu.Unlock() + + if !n.portmap { + return 0, false + } + + wanAP := netip.AddrPortFrom(n.wanIP, wantExtPort) + dst := netip.AddrPortFrom(src, dstLANPort) + + if sec == 0 { + lanAP, ok := n.portMap[wanAP] + if ok && lanAP.dst.Addr() == src { + delete(n.portMap, wanAP) + } + return 0, false + } + + // See if they already have a mapping and extend expiry if so. + for k, v := range n.portMap { + if v.dst == dst { + n.portMap[k] = portMapping{ + dst: dst, + expiry: time.Now().Add(time.Duration(sec) * time.Second), + } + return k.Port(), true + } + } + + for try := 0; try < 20_000; try++ { + if wanAP.Port() > 0 && !n.natTable.IsPublicPortUsed(wanAP) { + mak.Set(&n.portMap, wanAP, portMapping{ + dst: dst, + expiry: time.Now().Add(time.Duration(sec) * time.Second), + }) + n.logf("vnet: allocated NAT mapping from %v to %v", wanAP, dst) + return wanAP.Port(), true + } + wantExtPort = rand.N(uint16(32<<10)) + 32<<10 + wanAP = netip.AddrPortFrom(n.wanIP, wantExtPort) + } + return 0, false +} + +func (n *network) createARPResponse(pkt gopacket.Packet) ([]byte, error) { + ethLayer, ok := pkt.Layer(layers.LayerTypeEthernet).(*layers.Ethernet) + if !ok { + return nil, nil + } + arpLayer, ok := pkt.Layer(layers.LayerTypeARP).(*layers.ARP) + if !ok || + arpLayer.Operation != layers.ARPRequest || + arpLayer.AddrType != layers.LinkTypeEthernet || + arpLayer.Protocol != layers.EthernetTypeIPv4 || + arpLayer.HwAddressSize != 6 || + arpLayer.ProtAddressSize != 4 || + len(arpLayer.DstProtAddress) != 4 { + return nil, nil + } + + wantIP := netip.AddrFrom4([4]byte(arpLayer.DstProtAddress)) + foundMAC, ok := n.MACOfIP(wantIP) + if !ok { + return nil, nil + } + + eth := &layers.Ethernet{ + SrcMAC: foundMAC.HWAddr(), + DstMAC: ethLayer.SrcMAC, + EthernetType: layers.EthernetTypeARP, + } + + a2 := &layers.ARP{ + AddrType: layers.LinkTypeEthernet, + Protocol: layers.EthernetTypeIPv4, + HwAddressSize: 6, + ProtAddressSize: 4, + Operation: layers.ARPReply, + SourceHwAddress: foundMAC.HWAddr(), + SourceProtAddress: arpLayer.DstProtAddress, + DstHwAddress: ethLayer.SrcMAC, + DstProtAddress: arpLayer.SourceProtAddress, + } + + buffer := gopacket.NewSerializeBuffer() + options := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + if err := gopacket.SerializeLayers(buffer, options, eth, a2); err != nil { + return nil, err + } + + return buffer.Bytes(), nil +} + +func (n *network) handleNATPMPRequest(req UDPPacket) { + if !n.portmap { + return + } + if string(req.Payload) == "\x00\x00" { + // https://www.rfc-editor.org/rfc/rfc6886#section-3.2 + + res := make([]byte, 0, 12) + res = append(res, + 0, // version 0 (NAT-PMP) + 128, // response to op 0 (128+0) + 0, 0, // result code success + ) + res = binary.BigEndian.AppendUint32(res, uint32(time.Now().Unix())) + wan4 := n.wanIP.As4() + res = append(res, wan4[:]...) + n.WriteUDPPacketNoNAT(UDPPacket{ + Src: req.Dst, + Dst: req.Src, + Payload: res, + }) + return + } + + // Map UDP request + if len(req.Payload) == 12 && req.Payload[0] == 0 && req.Payload[1] == 1 { + // https://www.rfc-editor.org/rfc/rfc6886#section-3.3 + // "00 01 00 00 ed 40 00 00 00 00 1c 20" => + // 00 ver + // 01 op=map UDP + // 00 00 reserved (0 in request; in response, this is the result code) + // ed 40 internal port 60736 + // 00 00 suggested external port + // 00 00 1c 20 suggested lifetime in seconds (7200 sec = 2 hours) + internalPort := binary.BigEndian.Uint16(req.Payload[4:6]) + wantExtPort := binary.BigEndian.Uint16(req.Payload[6:8]) + lifetimeSec := binary.BigEndian.Uint32(req.Payload[8:12]) + gotPort, ok := n.doPortMap(req.Src.Addr(), internalPort, wantExtPort, int(lifetimeSec)) + if !ok { + n.logf("NAT-PMP map request for %v:%d failed", req.Src.Addr(), internalPort) + return + } + res := make([]byte, 0, 16) + res = append(res, + 0, // version 0 (NAT-PMP) + 1+128, // response to op 1 + 0, 0, // result code success + ) + res = binary.BigEndian.AppendUint32(res, uint32(time.Now().Unix())) + res = binary.BigEndian.AppendUint16(res, internalPort) + res = binary.BigEndian.AppendUint16(res, gotPort) + res = binary.BigEndian.AppendUint32(res, lifetimeSec) + n.WriteUDPPacketNoNAT(UDPPacket{ + Src: req.Dst, + Dst: req.Src, + Payload: res, + }) + return + } + + n.logf("TODO: handle NAT-PMP packet % 02x", req.Payload) +} + +// UDPPacket is a UDP packet. +// +// For the purposes of this project, a UDP packet +// (not a general IP packet) is the unit to be NAT'ed, +// as that's all that Tailscale uses. +type UDPPacket struct { + Src netip.AddrPort + Dst netip.AddrPort + Payload []byte // everything after UDP header +} + +func (s *Server) WriteStartingBanner(w io.Writer) { + fmt.Fprintf(w, "vnet serving clients:\n") + + for _, n := range s.nodes { + fmt.Fprintf(w, " %v %15v (%v, %v)\n", n.mac, n.lanIP, n.net.wanIP, n.net.natStyle.Load()) + } +} + +type agentConn struct { + node *node + tc *gonet.TCPConn +} + +func (s *Server) addIdleAgentConn(ac *agentConn) { + //log.Printf("got agent conn from %v", ac.node.mac) + s.mu.Lock() + defer s.mu.Unlock() + + s.agentConns.Make() + s.agentConns.Add(ac) + + if waiter, ok := s.agentConnWaiter[ac.node]; ok { + select { + case waiter <- struct{}{}: + default: + } + } +} + +func (s *Server) takeAgentConn(ctx context.Context, n *node) (_ *agentConn, ok bool) { + for { + ac, ok := s.takeAgentConnOne(n) + if ok { + //log.Printf("got agent conn for %v", n.mac) + return ac, true + } + s.mu.Lock() + ready := make(chan struct{}) + mak.Set(&s.agentConnWaiter, n, ready) + s.mu.Unlock() + + //log.Printf("waiting for agent conn for %v", n.mac) + select { + case <-ctx.Done(): + return nil, false + case <-ready: + case <-time.After(time.Second): + // Try again regularly anyway, in case we have multiple clients + // trying to hit the same node, or if a race means we weren't in the + // select by the time addIdleAgentConn tried to signal us. + } + } +} + +func (s *Server) takeAgentConnOne(n *node) (_ *agentConn, ok bool) { + s.mu.Lock() + defer s.mu.Unlock() + for ac := range s.agentConns { + if ac.node == n { + s.agentConns.Delete(ac) + return ac, true + } + } + return nil, false +} + +type NodeAgentClient struct { + *tailscale.LocalClient + HTTPClient *http.Client +} + +func (s *Server) NodeAgentDialer(n *Node) DialFunc { + s.mu.Lock() + defer s.mu.Unlock() + + if d, ok := s.agentDialer[n.n]; ok { + return d + } + d := func(ctx context.Context, network, addr string) (net.Conn, error) { + ac, ok := s.takeAgentConn(ctx, n.n) + if !ok { + return nil, ctx.Err() + } + return ac.tc, nil + } + mak.Set(&s.agentDialer, n.n, d) + return d +} + +func (s *Server) NodeAgentClient(n *Node) *NodeAgentClient { + d := s.NodeAgentDialer(n) + return &NodeAgentClient{ + LocalClient: &tailscale.LocalClient{ + UseSocketOnly: true, + OmitAuth: true, + Dial: d, + }, + HTTPClient: &http.Client{ + Transport: &http.Transport{ + DialContext: d, + }, + }, + } +} + +// EnableHostFirewall enables the host's stateful firewall. +func (c *NodeAgentClient) EnableHostFirewall(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "GET", "http://unused/fw", nil) + if err != nil { + return err + } + res, err := c.HTTPClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + all, _ := io.ReadAll(res.Body) + if res.StatusCode != 200 { + return fmt.Errorf("unexpected status code %v: %s", res.Status, all) + } + return nil +} diff --git a/tstest/tools/tools.go b/tstest/tools/tools.go index 6dac885cdf71e..4d810483b78b5 100644 --- a/tstest/tools/tools.go +++ b/tstest/tools/tools.go @@ -8,7 +8,7 @@ package tools import ( - _ "fybrik.io/crdoc" + _ "github.com/elastic/crd-ref-docs" _ "github.com/tailscale/mkctr" _ "honnef.co/go/tools/cmd/staticcheck" _ "sigs.k8s.io/controller-tools/cmd/controller-gen" diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index b0a226c7f7461..9ddb3fad5d710 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -276,6 +276,10 @@ type LogOptions struct { // Now is a function giving the current time. Defaults to [time.Now]. Now func() time.Time + // QuietLogging suppresses all logging of handled HTTP requests, even if + // there are errors or status codes considered unsuccessful. Use this option + // to add your own logging in OnCompletion. + QuietLogging bool // QuietLoggingIfSuccessful suppresses logging of handled HTTP requests // where the request's response status code is 200 or 304. QuietLoggingIfSuccessful bool @@ -338,7 +342,7 @@ func (opts ErrorOptions) withDefaults() ErrorOptions { opts.Logf = logger.Discard } if opts.OnError == nil { - opts.OnError = writeHTTPError + opts.OnError = WriteHTTPError } return opts } @@ -372,6 +376,34 @@ type ReturnHandlerFunc func(http.ResponseWriter, *http.Request) error // request to the underlying handler, if appropriate. type Middleware func(h http.Handler) http.Handler +// MiddlewareStack combines multiple middleware into a single middleware for +// decorating a [http.Handler]. The first middleware argument will be the first +// to process an incoming request, before passing the request onto subsequent +// middleware and eventually the wrapped handler. +// +// For example: +// +// MiddlewareStack(A, B)(h).ServeHTTP(w, r) +// +// calls in sequence: +// +// a.ServeHTTP(w, r) +// -> b.ServeHTTP(w, r) +// -> h.ServeHTTP(w, r) +// +// (where the lowercase handlers were generated by the uppercase middleware). +func MiddlewareStack(mw ...Middleware) Middleware { + if len(mw) == 1 { + return mw[0] + } + return func(h http.Handler) http.Handler { + for i := len(mw) - 1; i >= 0; i-- { + h = mw[i](h) + } + return h + } +} + // ServeHTTPReturn calls f(w, r). func (f ReturnHandlerFunc) ServeHTTPReturn(w http.ResponseWriter, r *http.Request) error { return f(w, r) @@ -387,22 +419,25 @@ func StdHandler(h ReturnHandler, opts HandlerOptions) http.Handler { // LogHandler returns an http.Handler that logs to opts.Logf. // It logs both successful and failing requests. -// The log line includes the first error returned to [Handler] within. +// The log line includes the first error returned to [ErrorHandler] within. // The outer-most LogHandler(LogHandler(...)) does all of the logging. // Inner LogHandler instance do nothing. +// Panics are swallowed and their stack traces are put in the error. func LogHandler(h http.Handler, opts LogOptions) http.Handler { return logHandler{h, opts.withDefaults()} } // ErrorHandler converts a [ReturnHandler] into a standard [http.Handler]. // Errors are handled as specified by the [ReturnHandler.ServeHTTPReturn] method. +// When wrapped in a [LogHandler], panics are added to the [AccessLogRecord]; +// otherwise, panics continue up the stack. func ErrorHandler(h ReturnHandler, opts ErrorOptions) http.Handler { return errorHandler{h, opts.withDefaults()} } // errCallback is added to logHandler's request context so that errorHandler can // pass errors back up the stack to logHandler. -var errCallback = ctxkey.New[func(string)]("tailscale.com/tsweb.errCallback", nil) +var errCallback = ctxkey.New[func(HTTPError)]("tailscale.com/tsweb.errCallback", nil) // logHandler is a http.Handler which logs the HTTP request. // It injects an errCallback for errorHandler to augment the log message with @@ -433,21 +468,34 @@ func (h logHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { RequestID: RequestIDFromContext(r.Context()), } - var bucket string - var startRecorded bool - if bs := h.opts.BucketedStats; bs != nil { - bucket = bs.bucketForRequest(r) - if bs.Started != nil { - switch v := bs.Started.Map.Get(bucket).(type) { - case *expvar.Int: - // If we've already seen this bucket for, count it immediately. - // Otherwise, for newly seen paths, only count retroactively - // (so started-finished doesn't go negative) so we don't fill - // this LabelMap up with internet scanning spam. - v.Add(1) - startRecorded = true - } + if bs := h.opts.BucketedStats; bs != nil && bs.Started != nil && bs.Finished != nil { + bucket := bs.bucketForRequest(r) + var startRecorded bool + switch v := bs.Started.Map.Get(bucket).(type) { + case *expvar.Int: + // If we've already seen this bucket for, count it immediately. + // Otherwise, for newly seen paths, only count retroactively + // (so started-finished doesn't go negative) so we don't fill + // this LabelMap up with internet scanning spam. + v.Add(1) + startRecorded = true } + defer func() { + // Only increment metrics for buckets that result in good HTTP statuses + // or when we know the start was already counted. + // Otherwise they get full of internet scanning noise. Only filtering 404 + // gets most of the way there but there are also plenty of URLs that are + // almost right but result in 400s too. Seem easier to just only ignore + // all 4xx and 5xx. + if startRecorded { + bs.Finished.Add(bucket, 1) + } else if msg.Code < 400 { + // This is the first non-error request for this bucket, + // so count it now retroactively. + bs.Started.Add(bucket, 1) + bs.Finished.Add(bucket, 1) + } + }() } if fn := h.opts.OnStart; fn != nil { @@ -455,45 +503,55 @@ func (h logHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // Let errorHandler tell us what error it wrote to the client. - r = r.WithContext(errCallback.WithValue(ctx, func(e string) { - if ctx.Err() == context.Canceled { - msg.Code = 499 // nginx convention: Client Closed Request - msg.Err = context.Canceled.Error() - return - } + r = r.WithContext(errCallback.WithValue(ctx, func(e HTTPError) { + // Keep the deepest error. if msg.Err != "" { return } - msg.Err = e + + // Log the error. + if e.Msg != "" && e.Err != nil { + msg.Err = e.Msg + ": " + e.Err.Error() + } else if e.Err != nil { + msg.Err = e.Err.Error() + } else if e.Msg != "" { + msg.Err = e.Msg + } + + // We log the code from the loggingResponseWriter, except for + // cancellation where we override with 499. + if reqCancelled(r, e.Err) { + msg.Code = 499 + } })) - lw := &loggingResponseWriter{ResponseWriter: w, logf: h.opts.Logf} + lw := newLogResponseWriter(h.opts.Logf, w, r) - // Invoke the handler that we're logging. - var recovered any defer func() { + // If the handler panicked then make sure we include that in our error. + // Panics caught up errorHandler shouldn't appear here, unless the panic + // originates in one of its callbacks. + recovered := recover() if recovered != nil { - // TODO(icio): When the panic below is eventually caught by - // http.Server, it cancels the inlight request and the "500 Internal - // Server Error" response we wrote to the client below is never - // received, even if we flush it. - if f, ok := w.(http.Flusher); ok { - f.Flush() + if msg.Err == "" { + msg.Err = panic2err(recovered).Error() + } else { + msg.Err += "\n\nthen " + panic2err(recovered).Error() } - panic(recovered) } + h.logRequest(r, lw, msg) }() - func() { - defer func() { - recovered = recover() - }() - h.h.ServeHTTP(lw, r) - }() + h.h.ServeHTTP(lw, r) +} + +func (h logHandler) logRequest(r *http.Request, lw *loggingResponseWriter, msg AccessLogRecord) { // Complete our access log from the loggingResponseWriter. msg.Bytes = lw.bytes msg.Seconds = h.opts.Now().Sub(msg.Time).Seconds() switch { + case msg.Code != 0: + // Keep explicit codes from a few particular errors. case lw.hijacked: // Connection no longer belongs to us, just log that we // switched protocols away from HTTP. @@ -506,7 +564,16 @@ func (h logHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { msg.Code = lw.code } - if !h.opts.QuietLoggingIfSuccessful || (msg.Code != http.StatusOK && msg.Code != http.StatusNotModified) { + // Keep track of the original response code when we've overridden it. + if lw.code != 0 && msg.Code != lw.code { + if msg.Err == "" { + msg.Err = fmt.Sprintf("(original code %d)", lw.code) + } else { + msg.Err = fmt.Sprintf("%s (original code %d)", msg.Err, lw.code) + } + } + + if !h.opts.QuietLogging && !(h.opts.QuietLoggingIfSuccessful && (msg.Code == http.StatusOK || msg.Code == http.StatusNotModified)) { h.opts.Logf("%s", msg) } @@ -515,22 +582,6 @@ func (h logHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // Closing metrics. - if bs := h.opts.BucketedStats; bs != nil && bs.Finished != nil { - // Only increment metrics for buckets that result in good HTTP statuses - // or when we know the start was already counted. - // Otherwise they get full of internet scanning noise. Only filtering 404 - // gets most of the way there but there are also plenty of URLs that are - // almost right but result in 400s too. Seem easier to just only ignore - // all 4xx and 5xx. - if startRecorded { - bs.Finished.Add(bucket, 1) - } else if msg.Code < 400 { - // This is the first non-error request for this bucket, - // so count it now retroactively. - bs.Started.Add(bucket, 1) - bs.Finished.Add(bucket, 1) - } - } if h.opts.StatusCodeCounters != nil { h.opts.StatusCodeCounters.Add(responseCodeString(msg.Code/100), 1) } @@ -567,23 +618,43 @@ var responseCodeCache sync.Map // response code that gets sent, if any. type loggingResponseWriter struct { http.ResponseWriter + ctx context.Context code int bytes int hijacked bool logf logger.Logf } -// WriteHeader implements http.Handler. +// newLogResponseWriter returns a loggingResponseWriter which uses's the logger +// from r, or falls back to logf. If a nil logger is given, the logs are +// discarded. +func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter { + if l, ok := logger.LogfKey.ValueOk(r.Context()); ok && l != nil { + logf = l + } + if logf == nil { + logf = logger.Discard + } + return &loggingResponseWriter{ + ResponseWriter: w, + ctx: r.Context(), + logf: logf, + } +} + +// WriteHeader implements [http.ResponseWriter]. func (l *loggingResponseWriter) WriteHeader(statusCode int) { if l.code != 0 { l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode) return } - l.code = statusCode + if l.ctx.Err() == nil { + l.code = statusCode + } l.ResponseWriter.WriteHeader(statusCode) } -// Write implements http.Handler. +// Write implements [http.ResponseWriter]. func (l *loggingResponseWriter) Write(bs []byte) (int, error) { if l.code == 0 { l.code = 200 @@ -626,49 +697,37 @@ type errorHandler struct { // ServeHTTP implements the http.Handler interface. func (h errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - logf := h.opts.Logf - if l := logger.LogfKey.Value(r.Context()); l != nil { - logf = l - } - // Keep track of whether a response gets written. lw, ok := w.(*loggingResponseWriter) if !ok { - lw = &loggingResponseWriter{ - ResponseWriter: w, - logf: logf, - } + lw = newLogResponseWriter(h.opts.Logf, w, r) } - // In case the handler panics, we want to recover and continue logging the - // error before raising the panic again for the server to handle. - var panicRes any + var err error defer func() { - if panicRes != nil { - panic(panicRes) + // In case the handler panics, we want to recover and continue logging + // the error before logging it (or re-panicking if we couldn't log). + rec := recover() + if rec != nil { + err = panic2err(rec) + } + if err == nil { + return + } + if h.handleError(w, r, lw, err) { + return + } + if rec != nil { + // If we weren't able to log the panic somewhere, throw it up the + // stack to someone who can. + panic(rec) } }() - err := func() (err error) { - defer func() { - if r := recover(); r != nil { - panicRes = r - if r == http.ErrAbortHandler { - err = http.ErrAbortHandler - } else { - // Even if r is an error, do not wrap it as an error here as - // that would allow things like panic(vizerror.New("foo")) - // which is really hard to define the behavior of. - var stack [10000]byte - n := runtime.Stack(stack[:], false) - err = fmt.Errorf("panic: %v\n\n%s", r, stack[:n]) - } - } - }() - return h.rh.ServeHTTPReturn(lw, r) - }() - if err == nil { - return - } + err = h.rh.ServeHTTPReturn(lw, r) +} + +func (h errorHandler) handleError(w http.ResponseWriter, r *http.Request, lw *loggingResponseWriter, err error) bool { + var logged bool // Extract a presentable, loggable error. var hOK bool @@ -676,11 +735,18 @@ func (h errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if errors.As(err, &hErr) { hOK = true if hErr.Code == 0 { - logf("[unexpected] HTTPError %v did not contain an HTTP status code, sending internal server error", hErr) + lw.logf("[unexpected] HTTPError %v did not contain an HTTP status code, sending internal server error", hErr) hErr.Code = http.StatusInternalServerError } } else if v, ok := vizerror.As(err); ok { hErr = Error(http.StatusInternalServerError, v.Error(), nil) + } else if reqCancelled(r, err) { + // 499 is the Nginx convention meaning "Client Closed Connection". + if errors.Is(err, context.Canceled) || errors.Is(err, http.ErrAbortHandler) { + hErr = Error(499, "", err) + } else { + hErr = Error(499, "", fmt.Errorf("%w: %w", context.Canceled, err)) + } } else { // Omit the friendly message so HTTP logs show the bare error that was // returned and we know it's not a HTTPError. @@ -689,47 +755,109 @@ func (h errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Tell the logger what error we wrote back to the client. if pb := errCallback.Value(r.Context()); pb != nil { - if hErr.Msg != "" && hErr.Err != nil { - pb(hErr.Msg + ": " + hErr.Err.Error()) - } else if hErr.Err != nil { - pb(hErr.Err.Error()) - } else if hErr.Msg != "" { - pb(hErr.Msg) - } + pb(hErr) + logged = true + } + + if r.Context().Err() != nil { + return logged } if lw.code != 0 { - if hOK { - logf("[unexpected] handler returned HTTPError %v, but already sent a response with code %d", hErr, lw.code) + if hOK && hErr.Code != lw.code { + lw.logf("[unexpected] handler returned HTTPError %v, but already sent response with code %d", hErr, lw.code) } - return + return logged } // Set a default error message from the status code. Do this after we pass // the error back to the logger so that `return errors.New("oh")` logs as - // `"err": "oh"`, not `"err": "internal server error: oh"`. + // `"err": "oh"`, not `"err": "Internal Server Error: oh"`. if hErr.Msg == "" { - hErr.Msg = http.StatusText(hErr.Code) + switch hErr.Code { + case 499: + hErr.Msg = "Client Closed Request" + default: + hErr.Msg = http.StatusText(hErr.Code) + } } + // If OnError panics before a response is written, write a bare 500 back. + // OnError panics are thrown further up the stack. + defer func() { + if lw.code == 0 { + if rec := recover(); rec != nil { + w.WriteHeader(http.StatusInternalServerError) + panic(rec) + } + } + }() + h.opts.OnError(w, r, hErr) + return logged +} + +// panic2err converts a recovered value to an error containing the panic stack trace. +func panic2err(recovered any) error { + if recovered == nil { + return nil + } + if recovered == http.ErrAbortHandler { + return http.ErrAbortHandler + } + + // Even if r is an error, do not wrap it as an error here as + // that would allow things like panic(vizerror.New("foo")) + // which is really hard to define the behavior of. + var stack [10000]byte + n := runtime.Stack(stack[:], false) + return &panicError{ + rec: recovered, + stack: stack[:n], + } +} + +// panicError is an error that contains a panic. +type panicError struct { + rec any + stack []byte +} + +func (e *panicError) Error() string { + return fmt.Sprintf("panic: %v\n\n%s", e.rec, e.stack) +} + +func (e *panicError) Unwrap() error { + err, _ := e.rec.(error) + return err } -// writeHTTPError is the default error response formatter. -func writeHTTPError(w http.ResponseWriter, r *http.Request, hErr HTTPError) { +// reqCancelled returns true if err is http.ErrAbortHandler or r.Context.Err() +// is context.Canceled. +func reqCancelled(r *http.Request, err error) bool { + return errors.Is(err, http.ErrAbortHandler) || r.Context().Err() == context.Canceled +} + +// WriteHTTPError is the default error response formatter. +func WriteHTTPError(w http.ResponseWriter, r *http.Request, e HTTPError) { + // Don't write a response if we've hit a cancellation/abort. + if r.Context().Err() != nil || errors.Is(e.Err, http.ErrAbortHandler) { + return + } + // Default headers set by http.Error. h := w.Header() h.Set("Content-Type", "text/plain; charset=utf-8") h.Set("X-Content-Type-Options", "nosniff") // Custom headers from the error. - for k, vs := range hErr.Header { + for k, vs := range e.Header { h[k] = vs } // Write the msg back to the user. - w.WriteHeader(hErr.Code) - fmt.Fprint(w, hErr.Msg) + w.WriteHeader(e.Code) + fmt.Fprint(w, e.Msg) // If it's a plaintext message, add line breaks and RequestID. if strings.HasPrefix(h.Get("Content-Type"), "text/plain") { diff --git a/tsweb/tsweb_test.go b/tsweb/tsweb_test.go index fff7cc80561ae..13840c01225e3 100644 --- a/tsweb/tsweb_test.go +++ b/tsweb/tsweb_test.go @@ -13,6 +13,8 @@ import ( "net" "net/http" "net/http/httptest" + "net/http/httputil" + "net/textproto" "net/url" "strings" "testing" @@ -22,6 +24,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/metrics" "tailscale.com/tstest" + "tailscale.com/util/httpm" "tailscale.com/util/must" "tailscale.com/util/vizerror" ) @@ -492,6 +495,25 @@ func TestStdHandler(t *testing.T) { wantBody: "not found with request ID " + exampleRequestID + "\n", }, + { + name: "inner_cancelled", + rh: handlerErr(0, context.Canceled), // return canceled error, but the request was not cancelled + r: req(bgCtx, "http://example.com/"), + wantCode: 500, + wantLog: AccessLogRecord{ + Time: startTime, + Seconds: 1.0, + Proto: "HTTP/1.1", + TLS: false, + Host: "example.com", + Method: "GET", + Code: 500, + Err: "context canceled", + RequestURI: "/", + }, + wantBody: "Internal Server Error\n", + }, + { name: "nested", rh: ReturnHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { @@ -662,40 +684,419 @@ func TestStdHandler_Panic(t *testing.T) { if err != nil { t.Fatal(err) } - if <-recovered == nil { - t.Fatal("expected panic but saw none") + if rec := <-recovered; rec != nil { + t.Fatalf("expected no panic but saw: %v", rec) } // Check that the log message contained the stack trace in the error. var logerr bool if p := "panic: panicked elsewhere\n\ngoroutine "; !strings.HasPrefix(r.Err, p) { - t.Errorf("got error prefix %q, want %q", r.Err[:min(len(r.Err), len(p))], p) + t.Errorf("got Err prefix %q, want %q", r.Err[:min(len(r.Err), len(p))], p) logerr = true } if s := "\ntailscale.com/tsweb.panicElsewhere("; !strings.Contains(r.Err, s) { - t.Errorf("want substr %q, not found", s) + t.Errorf("want Err substr %q, not found", s) logerr = true } if logerr { t.Logf("logger got error: (quoted) %q\n\n(verbatim)\n%s", r.Err, r.Err) } - t.Run("check_response", func(t *testing.T) { - // TODO(icio): Swallow panics? tailscale/tailscale#12784 - t.SkipNow() + // Check that the server sent an error response. + if res.StatusCode != 500 { + t.Errorf("got status code %d, want %d", res.StatusCode, 500) + } + body, err := io.ReadAll(res.Body) + if err != nil { + t.Errorf("error reading body: %s", err) + } else if want := "Internal Server Error\n"; string(body) != want { + t.Errorf("got body %q, want %q", body, want) + } + res.Body.Close() +} + +func TestStdHandler_Canceled(t *testing.T) { + now := time.Now() + + r := make(chan AccessLogRecord) + var e *HTTPError + handlerOpen := make(chan struct{}) + h := StdHandler( + ReturnHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { + close(handlerOpen) + ctx := r.Context() + <-ctx.Done() + w.WriteHeader(200) // Ignored. + return ctx.Err() + }), + HandlerOptions{ + Logf: t.Logf, + Now: func() time.Time { return now }, + OnError: func(w http.ResponseWriter, r *http.Request, h HTTPError) { + e = &h + }, + OnCompletion: func(_ *http.Request, alr AccessLogRecord) { + r <- alr + }, + }, + ) + s := httptest.NewServer(h) + t.Cleanup(s.Close) + + // Create a context which gets canceled after the handler starts processing + // the request. + ctx, cancelReq := context.WithCancel(context.Background()) + go func() { + <-handlerOpen + cancelReq() + }() + + // Send a request to our server. + req, err := http.NewRequestWithContext(ctx, httpm.GET, s.URL, nil) + if err != nil { + t.Fatalf("making request: %s", err) + } + res, err := http.DefaultClient.Do(req) + if !errors.Is(err, context.Canceled) { + t.Errorf("got error %v, want context.Canceled", err) + } + if res != nil { + t.Errorf("got response %#v, want nil", res) + } + + // Check that we got the expected log record. + got := <-r + got.Seconds = 0 + got.RemoteAddr = "" + got.Host = "" + got.UserAgent = "" + want := AccessLogRecord{ + Time: now, + Code: 499, + Method: "GET", + Err: "context canceled", + Proto: "HTTP/1.1", + RequestURI: "/", + } + if d := cmp.Diff(want, got); d != "" { + t.Errorf("AccessLogRecord wrong (-want +got)\n%s", d) + } + + // Check that we rendered no response to the client after + // logHandler.OnCompletion has been called. + if e != nil { + t.Errorf("got OnError callback with %#v, want no callback", e) + } +} - // Check that the server sent an error response. - if res.StatusCode != 500 { - t.Errorf("got status code %d, want %d", res.StatusCode, 500) +func TestStdHandler_CanceledAfterHeader(t *testing.T) { + now := time.Now() + + r := make(chan AccessLogRecord) + var e *HTTPError + handlerOpen := make(chan struct{}) + h := StdHandler( + ReturnHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { + w.WriteHeader(http.StatusNoContent) + close(handlerOpen) + ctx := r.Context() + <-ctx.Done() + return ctx.Err() + }), + HandlerOptions{ + Logf: t.Logf, + Now: func() time.Time { return now }, + OnError: func(w http.ResponseWriter, r *http.Request, h HTTPError) { + e = &h + }, + OnCompletion: func(_ *http.Request, alr AccessLogRecord) { + r <- alr + }, + }, + ) + s := httptest.NewServer(h) + t.Cleanup(s.Close) + + // Create a context which gets canceled after the handler starts processing + // the request. + ctx, cancelReq := context.WithCancel(context.Background()) + go func() { + <-handlerOpen + cancelReq() + }() + + // Send a request to our server. + req, err := http.NewRequestWithContext(ctx, httpm.GET, s.URL, nil) + if err != nil { + t.Fatalf("making request: %s", err) + } + res, err := http.DefaultClient.Do(req) + if !errors.Is(err, context.Canceled) { + t.Errorf("got error %v, want context.Canceled", err) + } + if res != nil { + t.Errorf("got response %#v, want nil", res) + } + + // Check that we got the expected log record. + got := <-r + got.Seconds = 0 + got.RemoteAddr = "" + got.Host = "" + got.UserAgent = "" + want := AccessLogRecord{ + Time: now, + Code: 499, + Method: "GET", + Err: "context canceled (original code 204)", + Proto: "HTTP/1.1", + RequestURI: "/", + } + if d := cmp.Diff(want, got); d != "" { + t.Errorf("AccessLogRecord wrong (-want +got)\n%s", d) + } + + // Check that we rendered no response to the client after + // logHandler.OnCompletion has been called. + if e != nil { + t.Errorf("got OnError callback with %#v, want no callback", e) + } +} + +func TestStdHandler_ConnectionClosedDuringBody(t *testing.T) { + now := time.Now() + + // Start a HTTP server that writes back zeros until the request is abandoned. + // We next put a reverse-proxy in front of this server. + rs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + zeroes := make([]byte, 1024) + for r.Context().Err() == nil { + w.Write(zeroes) } - body, err := io.ReadAll(res.Body) - if err != nil { - t.Errorf("error reading body: %s", err) - } else if want := "internal server error\n"; string(body) != want { - t.Errorf("got body %q, want %q", body, want) + })) + defer rs.Close() + + r := make(chan AccessLogRecord) + var e *HTTPError + responseStarted := make(chan struct{}) + requestCanceled := make(chan struct{}) + + // Create another server which proxies our zeroes server. + // The [httputil.ReverseProxy] will panic with [http.ErrAbortHandler] when + // it fails to copy the response to the client. + h := StdHandler( + ReturnHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { + (&httputil.ReverseProxy{ + Director: func(r *http.Request) { + r.URL = must.Get(url.Parse(rs.URL)) + }, + }).ServeHTTP(w, r) + return nil + }), + HandlerOptions{ + Logf: t.Logf, + Now: func() time.Time { return now }, + OnError: func(w http.ResponseWriter, r *http.Request, h HTTPError) { + e = &h + }, + OnCompletion: func(_ *http.Request, alr AccessLogRecord) { + r <- alr + }, + }, + ) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + close(responseStarted) + <-requestCanceled + h.ServeHTTP(w, r.WithContext(context.WithoutCancel(r.Context()))) + })) + t.Cleanup(s.Close) + + // Create a context which gets canceled after the handler starts processing + // the request. + ctx, cancelReq := context.WithCancel(context.Background()) + go func() { + <-responseStarted + cancelReq() + }() + + // Send a request to our server. + req, err := http.NewRequestWithContext(ctx, httpm.GET, s.URL, nil) + if err != nil { + t.Fatalf("making request: %s", err) + } + res, err := http.DefaultClient.Do(req) + close(requestCanceled) + if !errors.Is(err, context.Canceled) { + t.Errorf("got error %v, want context.Canceled", err) + } + if res != nil { + t.Errorf("got response %#v, want nil", res) + } + + // Check that we got the expected log record. + got := <-r + got.Seconds = 0 + got.RemoteAddr = "" + got.Host = "" + got.UserAgent = "" + want := AccessLogRecord{ + Time: now, + Code: 499, + Method: "GET", + Err: "net/http: abort Handler (original code 200)", + Proto: "HTTP/1.1", + RequestURI: "/", + } + if d := cmp.Diff(want, got, cmpopts.IgnoreFields(AccessLogRecord{}, "Bytes")); d != "" { + t.Errorf("AccessLogRecord wrong (-want +got)\n%s", d) + } + + // Check that we rendered no response to the client after + // logHandler.OnCompletion has been called. + if e != nil { + t.Errorf("got OnError callback with %#v, want no callback", e) + } +} + +func TestStdHandler_OnErrorPanic(t *testing.T) { + var r AccessLogRecord + h := StdHandler( + ReturnHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { + // This response is supposed to be written by OnError, but it panics + // so nothing is written. + return Error(401, "lacking auth", nil) + }), + HandlerOptions{ + Logf: t.Logf, + OnError: func(w http.ResponseWriter, r *http.Request, h HTTPError) { + panicElsewhere() + }, + OnCompletion: func(_ *http.Request, alr AccessLogRecord) { + r = alr + }, + }, + ) + + // Run our panicking handler in a http.Server which catches and rethrows + // any panics. + recovered := make(chan any, 1) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + recovered <- recover() + }() + h.ServeHTTP(w, r) + })) + t.Cleanup(s.Close) + + // Send a request to our server. + res, err := http.Get(s.URL) + if err != nil { + t.Fatal(err) + } + if rec := <-recovered; rec != nil { + t.Fatalf("expected no panic but saw: %v", rec) + } + + // Check that the log message contained the stack trace in the error. + var logerr bool + if p := "lacking auth\n\nthen panic: panicked elsewhere\n\ngoroutine "; !strings.HasPrefix(r.Err, p) { + t.Errorf("got Err prefix %q, want %q", r.Err[:min(len(r.Err), len(p))], p) + logerr = true + } + if s := "\ntailscale.com/tsweb.panicElsewhere("; !strings.Contains(r.Err, s) { + t.Errorf("want Err substr %q, not found", s) + logerr = true + } + if logerr { + t.Logf("logger got error: (quoted) %q\n\n(verbatim)\n%s", r.Err, r.Err) + } + + // Check that the server sent a bare 500 response. + if res.StatusCode != 500 { + t.Errorf("got status code %d, want %d", res.StatusCode, 500) + } + body, err := io.ReadAll(res.Body) + if err != nil { + t.Errorf("error reading body: %s", err) + } else if want := ""; string(body) != want { + t.Errorf("got body %q, want %q", body, want) + } + res.Body.Close() +} + +func TestLogHandler_QuietLogging(t *testing.T) { + now := time.Now() + var logs []string + logf := func(format string, args ...any) { + logs = append(logs, fmt.Sprintf(format, args...)) + } + + var done bool + onComp := func(r *http.Request, alr AccessLogRecord) { + if done { + t.Fatal("expected only one OnCompletion call") } - res.Body.Close() - }) + done = true + + want := AccessLogRecord{ + Time: now, + RemoteAddr: "192.0.2.1:1234", + Proto: "HTTP/1.1", + Host: "example.com", + Method: "GET", + RequestURI: "/", + Code: 200, + } + if diff := cmp.Diff(want, alr); diff != "" { + t.Fatalf("unexpected OnCompletion AccessLogRecord (-want +got):\n%s", diff) + } + } + + LogHandler( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.WriteHeader(201) // loggingResponseWriter will write a warning. + }), + LogOptions{ + Logf: logf, + OnCompletion: onComp, + QuietLogging: true, + Now: func() time.Time { return now }, + }, + ).ServeHTTP( + httptest.NewRecorder(), + httptest.NewRequest("GET", "/", nil), + ) + + if !done { + t.Fatal("OnCompletion call didn't happen") + } + + wantLogs := []string{ + "[unexpected] HTTP handler set statusCode twice (200 and 201)", + } + if diff := cmp.Diff(wantLogs, logs); diff != "" { + t.Fatalf("logs (-want +got):\n%s", diff) + } +} + +func TestErrorHandler_Panic(t *testing.T) { + // errorHandler should panic when not wrapped in logHandler. + defer func() { + rec := recover() + if rec == nil { + t.Fatal("expected errorHandler to panic when not wrapped in logHandler") + } + if want := any("uhoh"); rec != want { + t.Fatalf("got panic %#v, want %#v", rec, want) + } + }() + ErrorHandler( + ReturnHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { + panic("uhoh") + }), + ErrorOptions{}, + ).ServeHTTP(httptest.NewRecorder(), httptest.NewRequest("GET", "/", nil)) } func panicElsewhere() { @@ -905,3 +1306,40 @@ func TestBucket(t *testing.T) { }) } } + +func ExampleMiddlewareStack() { + // setHeader returns a middleware that sets header k = vs. + setHeader := func(k string, vs ...string) Middleware { + k = textproto.CanonicalMIMEHeaderKey(k) + return func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header()[k] = vs + h.ServeHTTP(w, r) + }) + } + } + + // h is a http.Handler which prints the A, B & C response headers, wrapped + // in a few middleware which set those headers. + var h http.Handler = MiddlewareStack( + setHeader("A", "mw1"), + MiddlewareStack( + setHeader("A", "mw2.1"), + setHeader("B", "mw2.2"), + setHeader("C", "mw2.3"), + setHeader("C", "mw2.4"), + ), + setHeader("B", "mw3"), + )(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Println("A", w.Header().Get("A")) + fmt.Println("B", w.Header().Get("B")) + fmt.Println("C", w.Header().Get("C")) + })) + + // Invoke the handler. + h.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest("", "/", nil)) + // Output: + // A mw2.1 + // B mw3 + // C mw2.4 +} diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index 755b5ca6f29f9..43325512d9cb0 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -154,3 +154,38 @@ func SyncFuncErr[T any](fill func() (T, error)) func() (T, error) { return v, err } } + +// TB is a subset of testing.TB that we use to set up test helpers. +// It's defined here to avoid pulling in the testing package. +type TB interface { + Helper() + Cleanup(func()) +} + +// SetForTest sets z's value and error. +// It's used in tests only and reverts z's state back when tb and all its +// subtests complete. +// It is not safe for concurrent use and must not be called concurrently with +// any SyncValue methods, including another call to itself. +func (z *SyncValue[T]) SetForTest(tb TB, val T, err error) { + tb.Helper() + + oldErr, oldVal := z.err.Load(), z.v + z.once.Do(func() {}) + + z.v = val + if err != nil { + z.err.Store(ptr.To(err)) + } else { + z.err.Store(nilErrPtr) + } + + tb.Cleanup(func() { + if oldErr == nil { + *z = SyncValue[T]{} + } else { + z.v = oldVal + z.err.Store(oldErr) + } + }) +} diff --git a/types/lazy/sync_test.go b/types/lazy/sync_test.go index ab3ed427d3ebc..5578eee0cfed9 100644 --- a/types/lazy/sync_test.go +++ b/types/lazy/sync_test.go @@ -8,6 +8,8 @@ import ( "fmt" "sync" "testing" + + "tailscale.com/types/opt" ) func TestSyncValue(t *testing.T) { @@ -147,6 +149,212 @@ func TestSyncValueConcurrent(t *testing.T) { wg.Wait() } +func TestSyncValueSetForTest(t *testing.T) { + testErr := errors.New("boom") + tests := []struct { + name string + initValue opt.Value[int] + initErr opt.Value[error] + setForTestValue int + setForTestErr error + getValue int + getErr opt.Value[error] + wantValue int + wantErr error + routines int + }{ + { + name: "GetOk", + setForTestValue: 42, + getValue: 8, + wantValue: 42, + }, + { + name: "GetOk/WithInit", + initValue: opt.ValueOf(4), + setForTestValue: 42, + getValue: 8, + wantValue: 42, + }, + { + name: "GetOk/WithInitErr", + initValue: opt.ValueOf(4), + initErr: opt.ValueOf(errors.New("blast")), + setForTestValue: 42, + getValue: 8, + wantValue: 42, + }, + { + name: "GetErr", + setForTestValue: 42, + setForTestErr: testErr, + getValue: 8, + getErr: opt.ValueOf(errors.New("ka-boom")), + wantValue: 42, + wantErr: testErr, + }, + { + name: "GetErr/NilError", + setForTestValue: 42, + setForTestErr: nil, + getValue: 8, + getErr: opt.ValueOf(errors.New("ka-boom")), + wantValue: 42, + wantErr: nil, + }, + { + name: "GetErr/WithInitErr", + initValue: opt.ValueOf(4), + initErr: opt.ValueOf(errors.New("blast")), + setForTestValue: 42, + setForTestErr: testErr, + getValue: 8, + getErr: opt.ValueOf(errors.New("ka-boom")), + wantValue: 42, + wantErr: testErr, + }, + { + name: "Concurrent/GetOk", + setForTestValue: 42, + getValue: 8, + wantValue: 42, + routines: 10000, + }, + { + name: "Concurrent/GetOk/WithInitErr", + initValue: opt.ValueOf(4), + initErr: opt.ValueOf(errors.New("blast")), + setForTestValue: 42, + getValue: 8, + wantValue: 42, + routines: 10000, + }, + { + name: "Concurrent/GetErr", + setForTestValue: 42, + setForTestErr: testErr, + getValue: 8, + getErr: opt.ValueOf(errors.New("ka-boom")), + wantValue: 42, + wantErr: testErr, + routines: 10000, + }, + { + name: "Concurrent/GetErr/WithInitErr", + initValue: opt.ValueOf(4), + initErr: opt.ValueOf(errors.New("blast")), + setForTestValue: 42, + setForTestErr: testErr, + getValue: 8, + getErr: opt.ValueOf(errors.New("ka-boom")), + wantValue: 42, + wantErr: testErr, + routines: 10000, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var v SyncValue[int] + + // Initialize the sync value with the specified value and/or error, + // if required by the test. + if initValue, ok := tt.initValue.GetOk(); ok { + var wantInitErr, gotInitErr error + var wantInitValue, gotInitValue int + wantInitValue = initValue + if initErr, ok := tt.initErr.GetOk(); ok { + wantInitErr = initErr + gotInitValue, gotInitErr = v.GetErr(func() (int, error) { return initValue, initErr }) + } else { + gotInitValue = v.Get(func() int { return initValue }) + } + + if gotInitErr != wantInitErr { + t.Fatalf("InitErr: got %v; want %v", gotInitErr, wantInitErr) + } + if gotInitValue != wantInitValue { + t.Fatalf("InitValue: got %v; want %v", gotInitValue, wantInitValue) + } + + // Verify that SetForTest reverted the error and the value during the test cleanup. + t.Cleanup(func() { + wantCleanupValue, wantCleanupErr := wantInitValue, wantInitErr + gotCleanupValue, gotCleanupErr, ok := v.PeekErr() + if !ok { + t.Fatal("SyncValue is not set after cleanup") + } + if gotCleanupErr != wantCleanupErr { + t.Fatalf("CleanupErr: got %v; want %v", gotCleanupErr, wantCleanupErr) + } + if gotCleanupValue != wantCleanupValue { + t.Fatalf("CleanupValue: got %v; want %v", gotCleanupValue, wantCleanupValue) + } + }) + } else { + // Verify that if v wasn't set prior to SetForTest, it's + // reverted to a valid unset state during the test cleanup. + t.Cleanup(func() { + if _, _, ok := v.PeekErr(); ok { + t.Fatal("SyncValue is set after cleanup") + } + wantCleanupValue, wantCleanupErr := 42, errors.New("ka-boom") + gotCleanupValue, gotCleanupErr := v.GetErr(func() (int, error) { return wantCleanupValue, wantCleanupErr }) + if gotCleanupErr != wantCleanupErr { + t.Fatalf("CleanupErr: got %v; want %v", gotCleanupErr, wantCleanupErr) + } + if gotCleanupValue != wantCleanupValue { + t.Fatalf("CleanupValue: got %v; want %v", gotCleanupValue, wantCleanupValue) + } + }) + } + + // Set the test value and/or error. + v.SetForTest(t, tt.setForTestValue, tt.setForTestErr) + + // Verify that the value and/or error have been set. + // This will run on either the current goroutine + // or concurrently depending on the tt.routines value. + checkSyncValue := func() { + var gotValue int + var gotErr error + if getErr, ok := tt.getErr.GetOk(); ok { + gotValue, gotErr = v.GetErr(func() (int, error) { return tt.getValue, getErr }) + } else { + gotValue = v.Get(func() int { return tt.getValue }) + } + + if gotErr != tt.wantErr { + t.Errorf("Err: got %v; want %v", gotErr, tt.wantErr) + } + if gotValue != tt.wantValue { + t.Errorf("Value: got %v; want %v", gotValue, tt.wantValue) + } + } + + switch tt.routines { + case 0: + checkSyncValue() + default: + var wg sync.WaitGroup + wg.Add(tt.routines) + start := make(chan struct{}) + for range tt.routines { + go func() { + defer wg.Done() + // Every goroutine waits for the go signal, so that more of them + // have a chance to race on the initial Get than with sequential + // goroutine starts. + <-start + checkSyncValue() + }() + } + close(start) + wg.Wait() + } + }) + } +} + func TestSyncFunc(t *testing.T) { f := SyncFunc(fortyTwo) diff --git a/types/opt/value.go b/types/opt/value.go index 1781731a41698..54fab7a538270 100644 --- a/types/opt/value.go +++ b/types/opt/value.go @@ -64,6 +64,14 @@ func (o Value[T]) Get() T { return o.value } +// GetOr returns the value of o or def if a value hasn't been set. +func (o Value[T]) GetOr(def T) T { + if o.set { + return o.value + } + return def +} + // Get returns the value and a flag indicating whether the value is set. func (o Value[T]) GetOk() (v T, ok bool) { return o.value, o.set diff --git a/types/views/views.go b/types/views/views.go index 42758966f4cef..4edd72688f7b4 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "maps" + "reflect" "slices" "go4.org/mem" @@ -111,6 +112,13 @@ type StructView[T any] interface { AsStruct() T } +// Cloner is any type that has a Clone function returning a deep-clone of the receiver. +type Cloner[T any] interface { + // Clone returns a deep-clone of the receiver. + // It returns nil, when the receiver is nil. + Clone() T +} + // ViewCloner is any type that has had View and Clone funcs generated using // tailscale.com/cmd/viewer. type ViewCloner[T any, V StructView[T]] interface { @@ -555,3 +563,46 @@ func (m MapFn[K, T, V]) Range(f MapRangeFn[K, V]) { } } } + +// ContainsPointers reports whether T contains any pointers, +// either explicitly or implicitly. +// It has special handling for some types that contain pointers +// that we know are free from memory aliasing/mutation concerns. +func ContainsPointers[T any]() bool { + return containsPointers(reflect.TypeFor[T]()) +} + +func containsPointers(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Pointer, reflect.UnsafePointer: + return true + case reflect.Chan, reflect.Map, reflect.Slice: + return true + case reflect.Array: + return containsPointers(typ.Elem()) + case reflect.Interface, reflect.Func: + return true // err on the safe side. + case reflect.Struct: + if isWellKnownImmutableStruct(typ) { + return false + } + for i := range typ.NumField() { + if containsPointers(typ.Field(i).Type) { + return true + } + } + } + return false +} + +func isWellKnownImmutableStruct(typ reflect.Type) bool { + switch typ.String() { + case "time.Time": + // time.Time contains a pointer that does not need copying + return true + case "netip.Addr", "netip.Prefix", "netip.AddrPort": + return true + default: + return false + } +} diff --git a/types/views/views_test.go b/types/views/views_test.go index 0173d3207b1c7..1a4f1f2d4405f 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -10,6 +10,7 @@ import ( "reflect" "strings" "testing" + "unsafe" qt "github.com/frankban/quicktest" ) @@ -22,6 +23,16 @@ type viewStruct struct { StringsPtr *Slice[string] `json:",omitempty"` } +type noPtrStruct struct { + Int int + Str string +} + +type withPtrStruct struct { + Int int + StrPtr *string +} + func BenchmarkSliceIteration(b *testing.B) { var data []viewStruct for i := range 10000 { @@ -189,3 +200,215 @@ func TestSliceMapKey(t *testing.T) { } } } + +func TestContainsPointers(t *testing.T) { + tests := []struct { + name string + typ reflect.Type + wantPtrs bool + }{ + { + name: "bool", + typ: reflect.TypeFor[bool](), + wantPtrs: false, + }, + { + name: "int", + typ: reflect.TypeFor[int](), + wantPtrs: false, + }, + { + name: "int8", + typ: reflect.TypeFor[int8](), + wantPtrs: false, + }, + { + name: "int16", + typ: reflect.TypeFor[int16](), + wantPtrs: false, + }, + { + name: "int32", + typ: reflect.TypeFor[int32](), + wantPtrs: false, + }, + { + name: "int64", + typ: reflect.TypeFor[int64](), + wantPtrs: false, + }, + { + name: "uint", + typ: reflect.TypeFor[uint](), + wantPtrs: false, + }, + { + name: "uint8", + typ: reflect.TypeFor[uint8](), + wantPtrs: false, + }, + { + name: "uint16", + typ: reflect.TypeFor[uint16](), + wantPtrs: false, + }, + { + name: "uint32", + typ: reflect.TypeFor[uint32](), + wantPtrs: false, + }, + { + name: "uint64", + typ: reflect.TypeFor[uint64](), + wantPtrs: false, + }, + { + name: "uintptr", + typ: reflect.TypeFor[uintptr](), + wantPtrs: false, + }, + { + name: "string", + typ: reflect.TypeFor[string](), + wantPtrs: false, + }, + { + name: "float32", + typ: reflect.TypeFor[float32](), + wantPtrs: false, + }, + { + name: "float64", + typ: reflect.TypeFor[float64](), + wantPtrs: false, + }, + { + name: "complex64", + typ: reflect.TypeFor[complex64](), + wantPtrs: false, + }, + { + name: "complex128", + typ: reflect.TypeFor[complex128](), + wantPtrs: false, + }, + { + name: "netip-Addr", + typ: reflect.TypeFor[netip.Addr](), + wantPtrs: false, + }, + { + name: "netip-Prefix", + typ: reflect.TypeFor[netip.Prefix](), + wantPtrs: false, + }, + { + name: "netip-AddrPort", + typ: reflect.TypeFor[netip.AddrPort](), + wantPtrs: false, + }, + { + name: "bool-ptr", + typ: reflect.TypeFor[*bool](), + wantPtrs: true, + }, + { + name: "string-ptr", + typ: reflect.TypeFor[*string](), + wantPtrs: true, + }, + { + name: "netip-Addr-ptr", + typ: reflect.TypeFor[*netip.Addr](), + wantPtrs: true, + }, + { + name: "unsafe-ptr", + typ: reflect.TypeFor[unsafe.Pointer](), + wantPtrs: true, + }, + { + name: "no-ptr-struct", + typ: reflect.TypeFor[noPtrStruct](), + wantPtrs: false, + }, + { + name: "ptr-struct", + typ: reflect.TypeFor[withPtrStruct](), + wantPtrs: true, + }, + { + name: "string-array", + typ: reflect.TypeFor[[5]string](), + wantPtrs: false, + }, + { + name: "int-ptr-array", + typ: reflect.TypeFor[[5]*int](), + wantPtrs: true, + }, + { + name: "no-ptr-struct-array", + typ: reflect.TypeFor[[5]noPtrStruct](), + wantPtrs: false, + }, + { + name: "with-ptr-struct-array", + typ: reflect.TypeFor[[5]withPtrStruct](), + wantPtrs: true, + }, + { + name: "string-slice", + typ: reflect.TypeFor[[]string](), + wantPtrs: true, + }, + { + name: "int-ptr-slice", + typ: reflect.TypeFor[[]int](), + wantPtrs: true, + }, + { + name: "no-ptr-struct-slice", + typ: reflect.TypeFor[[]noPtrStruct](), + wantPtrs: true, + }, + { + name: "string-map", + typ: reflect.TypeFor[map[string]string](), + wantPtrs: true, + }, + { + name: "int-map", + typ: reflect.TypeFor[map[int]int](), + wantPtrs: true, + }, + { + name: "no-ptr-struct-map", + typ: reflect.TypeFor[map[string]noPtrStruct](), + wantPtrs: true, + }, + { + name: "chan", + typ: reflect.TypeFor[chan int](), + wantPtrs: true, + }, + { + name: "func", + typ: reflect.TypeFor[func()](), + wantPtrs: true, + }, + { + name: "interface", + typ: reflect.TypeFor[any](), + wantPtrs: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotPtrs := containsPointers(tt.typ); gotPtrs != tt.wantPtrs { + t.Errorf("got %v; want %v", gotPtrs, tt.wantPtrs) + } + }) + } +} diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index 13dbc94a4043d..3ef4b9cc1230a 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -111,6 +111,14 @@ func (it *ImportTracker) QualifiedName(t types.Type) string { return types.TypeString(t, it.qualifier) } +// PackagePrefix returns the prefix to be used when referencing named objects from pkg. +func (it *ImportTracker) PackagePrefix(pkg *types.Package) string { + if s := it.qualifier(pkg); s != "" { + return s + "." + } + return "" +} + // Write prints all the tracked imports in a single import block to w. func (it *ImportTracker) Write(w io.Writer) { fmt.Fprintf(w, "import (\n") @@ -239,7 +247,7 @@ func AssertStructUnchanged(t *types.Struct, tname string, params *types.TypePara } } if st.Anonymous() { - w("\t%s %s", fname, tag) + w("\t%s %s", qname, tag) } else { w("\t%s %s %s", fname, qname, tag) } diff --git a/util/codegen/codegen_test.go b/util/codegen/codegen_test.go index 5f4a139793bb9..9c61da51d0ae2 100644 --- a/util/codegen/codegen_test.go +++ b/util/codegen/codegen_test.go @@ -4,8 +4,10 @@ package codegen import ( + "go/types" "log" "net/netip" + "strings" "testing" "unsafe" @@ -174,3 +176,79 @@ func TestGenericContainsPointers(t *testing.T) { }) } } + +func TestAssertStructUnchanged(t *testing.T) { + type args struct { + t *types.Struct + tname string + params *types.TypeParamList + ctx string + it *ImportTracker + } + + // package t1 with a struct T1 with two fields + p1 := types.NewPackage("t1", "t1") + t1 := types.NewNamed(types.NewTypeName(0, p1, "T1", nil), types.NewStruct([]*types.Var{ + types.NewField(0, nil, "P1", types.Typ[types.Int], false), + types.NewField(0, nil, "P2", types.Typ[types.String], false), + }, nil), nil) + p1.Scope().Insert(t1.Obj()) + + tests := []struct { + name string + args args + want []byte + }{ + { + name: "t1-internally_defined", + args: args{ + t: t1.Underlying().(*types.Struct), + tname: "prefix_", + params: nil, + ctx: "", + it: NewImportTracker(p1), + }, + want: []byte("var _prefix_NeedsRegeneration = prefix_(struct {\n\tP1 int \n\tP2 string \n}{})"), + }, + { + name: "t2-with_named_field", + args: args{ + t: types.NewStruct([]*types.Var{ + types.NewField(0, nil, "T1", t1, false), + types.NewField(0, nil, "P1", types.Typ[types.Int], false), + types.NewField(0, nil, "P2", types.Typ[types.String], false), + }, nil), + tname: "prefix_", + params: nil, + ctx: "", + it: NewImportTracker(types.NewPackage("t2", "t2")), + }, + // the struct should be regenerated with the named field + want: []byte("var _prefix_NeedsRegeneration = prefix_(struct {\n\tT1 t1.T1 \n\tP1 int \n\tP2 string \n}{})"), + }, + { + name: "t3-with_embedded_field", + args: args{ + t: types.NewStruct([]*types.Var{ + types.NewField(0, nil, "T1", t1, true), + types.NewField(0, nil, "P1", types.Typ[types.Int], false), + types.NewField(0, nil, "P2", types.Typ[types.String], false), + }, nil), + tname: "prefix_", + params: nil, + ctx: "", + it: NewImportTracker(types.NewPackage("t3", "t3")), + }, + // the struct should be regenerated with the embedded field + want: []byte("var _prefix_NeedsRegeneration = prefix_(struct {\n\tt1.T1 \n\tP1 int \n\tP2 string \n}{})"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := AssertStructUnchanged(tt.args.t, tt.args.tname, tt.args.params, tt.args.ctx, tt.args.it); !strings.Contains(string(got), string(tt.want)) { + t.Errorf("AssertStructUnchanged() = \n%s\nwant: \n%s", string(got), string(tt.want)) + } + }) + } +} diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index 1bcbffe340b67..ca5f65c2a4e0e 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -592,9 +592,23 @@ func New(logf logger.Logf, prefHint string) (NetfilterRunner, error) { mode := detectFirewallMode(logf, prefHint) switch mode { case FirewallModeIPTables: - return newIPTablesRunner(logf) + // Note that we don't simply return an newIPTablesRunner here because it + // would return a `nil` iptablesRunner which is different from returning + // a nil NetfilterRunner. + ipr, err := newIPTablesRunner(logf) + if err != nil { + return nil, err + } + return ipr, nil case FirewallModeNfTables: - return newNfTablesRunner(logf) + // Note that we don't simply return an newNfTablesRunner here because it + // would return a `nil` nftablesRunner which is different from returning + // a nil NetfilterRunner. + nfr, err := newNfTablesRunner(logf) + if err != nil { + return nil, err + } + return nfr, nil default: return nil, fmt.Errorf("unknown firewall mode %v", mode) } diff --git a/util/osuser/user.go b/util/osuser/user.go index b10186c9757e7..2c7f2e24b9b11 100644 --- a/util/osuser/user.go +++ b/util/osuser/user.go @@ -51,8 +51,10 @@ func LookupByUsername(username string) (*user.User, error) { type lookupStd func(string) (*user.User, error) func lookup(usernameOrUID string, std lookupStd, wantShell bool) (*user.User, string, error) { - // TODO(awly): we should use genet on more platforms, like FreeBSD. - if runtime.GOOS != "linux" { + // Skip getent entirely on Non-Unix platforms that won't ever have it. + // (Using HasPrefix for "wasip1", anticipating that WASI support will + // move beyond "preview 1" some day.) + if runtime.GOOS == "windows" || runtime.GOOS == "js" || runtime.GOARCH == "wasm" { u, err := std(usernameOrUID) return u, "", err } @@ -129,6 +131,14 @@ func userLookupGetent(usernameOrUID string, std lookupStd) (*user.User, string, for len(f) < 7 { f = append(f, "") } + var mandatoryFields = []int{0, 2, 3, 5} + for _, v := range mandatoryFields { + if f[v] == "" { + log.Printf("getent for user %q returned invalid output: %q", usernameOrUID, out) + u, err := std(usernameOrUID) + return u, "", err + } + } return &user.User{ Username: f[0], Uid: f[2], diff --git a/util/slicesx/slicesx.go b/util/slicesx/slicesx.go index 5f6eb8d91feae..8abf2bd645856 100644 --- a/util/slicesx/slicesx.go +++ b/util/slicesx/slicesx.go @@ -4,7 +4,10 @@ // Package slicesx contains some helpful generic slice functions. package slicesx -import "math/rand/v2" +import ( + "math/rand/v2" + "slices" +) // Interleave combines two slices of the form [a, b, c] and [x, y, z] into a // slice with elements interleaved; i.e. [a, x, b, y, c, z]. @@ -101,3 +104,35 @@ func AppendMatching[T any](dst, ps []T, f func(T) bool) []T { } return dst } + +// HasPrefix reports whether the byte slice s begins with prefix. +func HasPrefix[E comparable](s, prefix []E) bool { + return len(s) >= len(prefix) && slices.Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix reports whether the slice s ends with suffix. +func HasSuffix[E comparable](s, suffix []E) bool { + return len(s) >= len(suffix) && slices.Equal(s[len(s)-len(suffix):], suffix) +} + +// CutPrefix returns s without the provided leading prefix slice and reports +// whether it found the prefix. If s doesn't start with prefix, CutPrefix +// returns s, false. If prefix is the empty slice, CutPrefix returns s, true. +// CutPrefix returns slices of the original slice s, not copies. +func CutPrefix[E comparable](s, prefix []E) (after []E, found bool) { + if !HasPrefix(s, prefix) { + return s, false + } + return s[len(prefix):], true +} + +// CutSuffix returns s without the provided ending suffix slice and reports +// whether it found the suffix. If s doesn't end with suffix, CutSuffix returns +// s, false. If suffix is the empty slice, CutSuffix returns s, true. +// CutSuffix returns slices of the original slice s, not copies. +func CutSuffix[E comparable](s, suffix []E) (after []E, found bool) { + if !HasSuffix(s, suffix) { + return s, false + } + return s[:len(s)-len(suffix)], true +} diff --git a/util/slicesx/slicesx_test.go b/util/slicesx/slicesx_test.go index 854fe824d1130..be136d288f3e0 100644 --- a/util/slicesx/slicesx_test.go +++ b/util/slicesx/slicesx_test.go @@ -151,3 +151,49 @@ func TestAppendMatching(t *testing.T) { t.Errorf("got %v; want %v", v, wantOrigMem) } } + +func TestCutPrefix(t *testing.T) { + tests := []struct { + name string + s, prefix []int + after []int + found bool + }{ + {"has-prefix", []int{1, 2, 3}, []int{1}, []int{2, 3}, true}, + {"exact-prefix", []int{1, 2, 3}, []int{1, 2, 3}, []int{}, true}, + {"blank-prefix", []int{1, 2, 3}, []int{}, []int{1, 2, 3}, true}, + {"no-prefix", []int{1, 2, 3}, []int{42}, []int{1, 2, 3}, false}, + {"blank-slice", []int{}, []int{42}, []int{}, false}, + {"blank-all", []int{}, []int{}, []int{}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if after, found := CutPrefix(tt.s, tt.prefix); !slices.Equal(after, tt.after) || found != tt.found { + t.Errorf("CutPrefix(%v, %v) = %v, %v; want %v, %v", tt.s, tt.prefix, after, found, tt.after, tt.found) + } + }) + } +} + +func TestCutSuffix(t *testing.T) { + tests := []struct { + name string + s, suffix []int + before []int + found bool + }{ + {"has-suffix", []int{1, 2, 3}, []int{3}, []int{1, 2}, true}, + {"exact-suffix", []int{1, 2, 3}, []int{1, 2, 3}, []int{}, true}, + {"blank-suffix", []int{1, 2, 3}, []int{}, []int{1, 2, 3}, true}, + {"no-suffix", []int{1, 2, 3}, []int{42}, []int{1, 2, 3}, false}, + {"blank-slice", []int{}, []int{42}, []int{}, false}, + {"blank-all", []int{}, []int{}, []int{}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if before, found := CutSuffix(tt.s, tt.suffix); !slices.Equal(before, tt.before) || found != tt.found { + t.Errorf("CutSuffix(%v, %v) = %v, %v; want %v, %v", tt.s, tt.suffix, before, found, tt.before, tt.found) + } + }) + } +} diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go new file mode 100644 index 0000000000000..4c3e28d3914bb --- /dev/null +++ b/util/syspolicy/internal/internal.go @@ -0,0 +1,63 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package internal contains miscellaneous functions and types +// that are internal to the syspolicy packages. +package internal + +import ( + "bytes" + + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/lazy" + "tailscale.com/version" +) + +// OSForTesting is the operating system override used for testing. +// It follows the same naming convention as [version.OS]. +var OSForTesting lazy.SyncValue[string] + +// OS is like [version.OS], but supports a test hook. +func OS() string { + return OSForTesting.Get(version.OS) +} + +// TB is a subset of testing.TB that we use to set up test helpers. +// It's defined here to avoid pulling in the testing package. +type TB interface { + Helper() + Cleanup(func()) + Logf(format string, args ...any) + Error(args ...any) + Errorf(format string, args ...any) + Fatal(args ...any) + Fatalf(format string, args ...any) +} + +// EqualJSONForTest compares the JSON in j1 and j2 for semantic equality. +// It returns "", "", true if j1 and j2 are equal. Otherwise, it returns +// indented versions of j1 and j2 and false. +func EqualJSONForTest(tb TB, j1, j2 jsontext.Value) (s1, s2 string, equal bool) { + tb.Helper() + j1 = j1.Clone() + j2 = j2.Clone() + // Canonicalize JSON values for comparison. + if err := j1.Canonicalize(); err != nil { + tb.Error(err) + } + if err := j2.Canonicalize(); err != nil { + tb.Error(err) + } + // Check and return true if the two values are structurally equal. + if bytes.Equal(j1, j2) { + return "", "", true + } + // Otherwise, format the values for display and return false. + if err := j1.Indent("", "\t"); err != nil { + tb.Fatal(err) + } + if err := j2.Indent("", "\t"); err != nil { + tb.Fatal(err) + } + return j1.String(), j2.String(), false +} diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ef0cfed8fc87b..a88025205fa26 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -3,7 +3,9 @@ package syspolicy -type Key string +import "tailscale.com/util/syspolicy/setting" + +type Key = setting.Key const ( // Keys with a string value diff --git a/util/syspolicy/setting/errors.go b/util/syspolicy/setting/errors.go new file mode 100644 index 0000000000000..d7e14df83b8fe --- /dev/null +++ b/util/syspolicy/setting/errors.go @@ -0,0 +1,71 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "errors" + + "tailscale.com/types/ptr" +) + +var ( + // ErrNotConfigured is returned when the requested policy setting is not configured. + ErrNotConfigured = errors.New("not configured") + // ErrTypeMismatch is returned when there's a type mismatch between the actual type + // of the setting value and the expected type. + ErrTypeMismatch = errors.New("type mismatch") + // ErrNoSuchKey is returned by [DefinitionOf] when no policy setting + // has been registered with the specified key. + // + // Until 2024-08-02, this error was also returned by a [Handler] when the specified + // key did not have a value set. While the package maintains compatibility with this + // usage of ErrNoSuchKey, it is recommended to return [ErrNotConfigured] from newer + // [source.Store] implementations. + ErrNoSuchKey = errors.New("no such key") +) + +// ErrorText represents an error that occurs when reading or parsing a policy setting. +// This includes errors due to permissions issues, value type and format mismatches, +// and other platform- or source-specific errors. It does not include +// [ErrNotConfigured] and [ErrNoSuchKey], as those correspond to unconfigured +// policy settings rather than settings that cannot be read or parsed +// due to an error. +// +// ErrorText is used to marshal errors when a policy setting is sent over the wire, +// allowing the error to be logged or displayed. It does not preserve the +// type information of the underlying error. +type ErrorText string + +// NewErrorText returns a [ErrorText] with the specified error message. +func NewErrorText(text string) *ErrorText { + return ptr.To(ErrorText(text)) +} + +// NewErrorTextFromError returns an [ErrorText] with the text of the specified error, +// or nil if err is nil, [ErrNotConfigured], or [ErrNoSuchKey]. +func NewErrorTextFromError(err error) *ErrorText { + if err == nil || errors.Is(err, ErrNotConfigured) || errors.Is(err, ErrNoSuchKey) { + return nil + } + if err, ok := err.(*ErrorText); ok { + return err + } + return ptr.To(ErrorText(err.Error())) +} + +// Error implements error. +func (e ErrorText) Error() string { + return string(e) +} + +// MarshalText implements [encoding.TextMarshaler]. +func (e ErrorText) MarshalText() (text []byte, err error) { + return []byte(e.Error()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler]. +func (e *ErrorText) UnmarshalText(text []byte) error { + *e = ErrorText(text) + return nil +} diff --git a/util/syspolicy/setting/key.go b/util/syspolicy/setting/key.go new file mode 100644 index 0000000000000..406fde1321cc2 --- /dev/null +++ b/util/syspolicy/setting/key.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +// Key is a string that uniquely identifies a policy and must remain unchanged +// once established and documented for a given policy setting. It may contain +// alphanumeric characters and zero or more [KeyPathSeparator]s to group +// individual policy settings into categories. +type Key string + +// KeyPathSeparator allows logical grouping of policy settings into categories. +const KeyPathSeparator = "/" diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go new file mode 100644 index 0000000000000..078ef758e9150 --- /dev/null +++ b/util/syspolicy/setting/origin.go @@ -0,0 +1,71 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "fmt" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" +) + +// Origin describes where a policy or a policy setting is configured. +type Origin struct { + data settingOrigin +} + +// settingOrigin is the marshallable data of an [Origin]. +type settingOrigin struct { + Name string `json:",omitzero"` + Scope PolicyScope +} + +// NewOrigin returns a new [Origin] with the specified scope. +func NewOrigin(scope PolicyScope) *Origin { + return NewNamedOrigin("", scope) +} + +// NewNamedOrigin returns a new [Origin] with the specified scope and name. +func NewNamedOrigin(name string, scope PolicyScope) *Origin { + return &Origin{settingOrigin{name, scope}} +} + +// Scope reports the policy [PolicyScope] where the setting is configured. +func (s Origin) Scope() PolicyScope { + return s.data.Scope +} + +// Name returns the name of the policy source where the setting is configured, +// or "" if not available. +func (s Origin) Name() string { + return s.data.Name +} + +// String implements [fmt.Stringer]. +func (s Origin) String() string { + if s.Name() != "" { + return fmt.Sprintf("%s (%v)", s.Name(), s.Scope()) + } + return s.Scope().String() +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (s Origin) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return jsonv2.MarshalEncode(out, &s.data, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (s *Origin) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + return jsonv2.UnmarshalDecode(in, &s.data, opts) +} + +// MarshalJSON implements [json.Marshaler]. +func (s Origin) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(s) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (s *Origin) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 +} diff --git a/util/syspolicy/setting/policy_scope.go b/util/syspolicy/setting/policy_scope.go new file mode 100644 index 0000000000000..55fa339e7e813 --- /dev/null +++ b/util/syspolicy/setting/policy_scope.go @@ -0,0 +1,189 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "fmt" + "strings" + + "tailscale.com/types/lazy" +) + +var ( + lazyDefaultScope lazy.SyncValue[PolicyScope] + + // DeviceScope indicates a scope containing device-global policies. + DeviceScope = PolicyScope{kind: DeviceSetting} + // CurrentProfileScope indicates a scope containing policies that apply to the + // currently active Tailscale profile. + CurrentProfileScope = PolicyScope{kind: ProfileSetting} + // CurrentUserScope indicates a scope containing policies that apply to the + // current user, for whatever that means on the current platform and + // in the current application context. + CurrentUserScope = PolicyScope{kind: UserSetting} +) + +// PolicyScope is a management scope. +type PolicyScope struct { + kind Scope + userID string + profileID string +} + +// DefaultScope returns the default [PolicyScope] to be used by a program +// when querying policy settings. +// It returns [DeviceScope], unless explicitly changed with [SetDefaultScope]. +func DefaultScope() PolicyScope { + return lazyDefaultScope.Get(func() PolicyScope { return DeviceScope }) +} + +// SetDefaultScope attempts to set the specified scope as the default scope +// to be used by a program when querying policy settings. +// It fails and returns false if called more than once, or if the [DefaultScope] +// has already been used. +func SetDefaultScope(scope PolicyScope) bool { + return lazyDefaultScope.Set(scope) +} + +// UserScopeOf returns a policy [PolicyScope] of the user with the specified id. +func UserScopeOf(uid string) PolicyScope { + return PolicyScope{kind: UserSetting, userID: uid} +} + +// Kind reports the scope kind of s. +func (s PolicyScope) Kind() Scope { + return s.kind +} + +// IsApplicableSetting reports whether the specified setting applies to +// and can be retrieved for this scope. Policy settings are applicable +// to their own scopes as well as more specific scopes. For example, +// device settings are applicable to device, profile and user scopes, +// but user settings are only applicable to user scopes. +// For instance, a menu visibility setting is inherently a user setting +// and only makes sense in the context of a specific user. +func (s PolicyScope) IsApplicableSetting(setting *Definition) bool { + return setting != nil && setting.Scope() <= s.Kind() +} + +// IsConfigurableSetting reports whether the specified setting can be configured +// by a policy at this scope. Policy settings are configurable at their own scopes +// as well as broader scopes. For example, [UserSetting]s are configurable in +// user, profile, and device scopes, but [DeviceSetting]s are only configurable +// in the [DeviceScope]. For instance, the InstallUpdates policy setting +// can only be configured in the device scope, as it controls whether updates +// will be installed automatically on the device, rather than for specific users. +func (s PolicyScope) IsConfigurableSetting(setting *Definition) bool { + return setting != nil && setting.Scope() >= s.Kind() +} + +// Contains reports whether policy settings that apply to s also apply to s2. +// For example, policy settings that apply to the [DeviceScope] also apply to +// the [CurrentUserScope]. +func (s PolicyScope) Contains(s2 PolicyScope) bool { + if s.Kind() > s2.Kind() { + return false + } + switch s.Kind() { + case DeviceSetting: + return true + case ProfileSetting: + return s.profileID == s2.profileID + case UserSetting: + return s.userID == s2.userID + default: + panic("unreachable") + } +} + +// StrictlyContains is like [PolicyScope.Contains], but returns false +// when s and s2 is the same scope. +func (s PolicyScope) StrictlyContains(s2 PolicyScope) bool { + return s != s2 && s.Contains(s2) +} + +// String implements [fmt.Stringer]. +func (s PolicyScope) String() string { + if s.profileID == "" && s.userID == "" { + return s.kind.String() + } + return s.stringSlow() +} + +// MarshalText implements [encoding.TextMarshaler]. +func (s PolicyScope) MarshalText() ([]byte, error) { + return []byte(s.String()), nil +} + +// MarshalText implements [encoding.TextUnmarshaler]. +func (s *PolicyScope) UnmarshalText(b []byte) error { + *s = PolicyScope{} + parts := strings.SplitN(string(b), "/", 2) + for i, part := range parts { + kind, id, err := parseScopeAndID(part) + if err != nil { + return err + } + if i > 0 && kind <= s.kind { + return fmt.Errorf("invalid scope hierarchy: %s", b) + } + s.kind = kind + switch kind { + case DeviceSetting: + if id != "" { + return fmt.Errorf("the device scope must not have an ID: %s", b) + } + case ProfileSetting: + s.profileID = id + case UserSetting: + s.userID = id + } + } + return nil +} + +func (s PolicyScope) stringSlow() string { + var sb strings.Builder + writeScopeWithID := func(s Scope, id string) { + sb.WriteString(s.String()) + if id != "" { + sb.WriteRune('(') + sb.WriteString(id) + sb.WriteRune(')') + } + } + if s.kind == ProfileSetting || s.profileID != "" { + writeScopeWithID(ProfileSetting, s.profileID) + if s.kind != ProfileSetting { + sb.WriteRune('/') + } + } + if s.kind == UserSetting { + writeScopeWithID(UserSetting, s.userID) + } + return sb.String() +} + +func parseScopeAndID(s string) (scope Scope, id string, err error) { + name, params, ok := extractScopeAndParams(s) + if !ok { + return 0, "", fmt.Errorf("%q is not a valid scope string", s) + } + if err := scope.UnmarshalText([]byte(name)); err != nil { + return 0, "", err + } + return scope, params, nil +} + +func extractScopeAndParams(s string) (name, params string, ok bool) { + paramsStart := strings.Index(s, "(") + if paramsStart == -1 { + return s, "", true + } + paramsEnd := strings.LastIndex(s, ")") + if paramsEnd < paramsStart { + return "", "", false + } + return s[0:paramsStart], s[paramsStart+1 : paramsEnd], true +} diff --git a/util/syspolicy/setting/policy_scope_test.go b/util/syspolicy/setting/policy_scope_test.go new file mode 100644 index 0000000000000..e1b6cf7ea0a78 --- /dev/null +++ b/util/syspolicy/setting/policy_scope_test.go @@ -0,0 +1,565 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "reflect" + "testing" + + jsonv2 "github.com/go-json-experiment/json" +) + +func TestPolicyScopeIsApplicableSetting(t *testing.T) { + tests := []struct { + name string + scope PolicyScope + setting *Definition + wantApplicable bool + }{ + { + name: "DeviceScope/DeviceSetting", + scope: DeviceScope, + setting: NewDefinition("TestSetting", DeviceSetting, IntegerValue), + wantApplicable: true, + }, + { + name: "DeviceScope/ProfileSetting", + scope: DeviceScope, + setting: NewDefinition("TestSetting", ProfileSetting, IntegerValue), + wantApplicable: false, + }, + { + name: "DeviceScope/UserSetting", + scope: DeviceScope, + setting: NewDefinition("TestSetting", UserSetting, IntegerValue), + wantApplicable: false, + }, + { + name: "ProfileScope/DeviceSetting", + scope: CurrentProfileScope, + setting: NewDefinition("TestSetting", DeviceSetting, IntegerValue), + wantApplicable: true, + }, + { + name: "ProfileScope/ProfileSetting", + scope: CurrentProfileScope, + setting: NewDefinition("TestSetting", ProfileSetting, IntegerValue), + wantApplicable: true, + }, + { + name: "ProfileScope/UserSetting", + scope: CurrentProfileScope, + setting: NewDefinition("TestSetting", UserSetting, IntegerValue), + wantApplicable: false, + }, + { + name: "UserScope/DeviceSetting", + scope: CurrentUserScope, + setting: NewDefinition("TestSetting", DeviceSetting, IntegerValue), + wantApplicable: true, + }, + { + name: "UserScope/ProfileSetting", + scope: CurrentUserScope, + setting: NewDefinition("TestSetting", ProfileSetting, IntegerValue), + wantApplicable: true, + }, + { + name: "UserScope/UserSetting", + scope: CurrentUserScope, + setting: NewDefinition("TestSetting", UserSetting, IntegerValue), + wantApplicable: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotApplicable := tt.scope.IsApplicableSetting(tt.setting) + if gotApplicable != tt.wantApplicable { + t.Fatalf("got %v, want %v", gotApplicable, tt.wantApplicable) + } + }) + } +} + +func TestPolicyScopeIsConfigurableSetting(t *testing.T) { + tests := []struct { + name string + scope PolicyScope + setting *Definition + wantConfigurable bool + }{ + { + name: "DeviceScope/DeviceSetting", + scope: DeviceScope, + setting: NewDefinition("TestSetting", DeviceSetting, IntegerValue), + wantConfigurable: true, + }, + { + name: "DeviceScope/ProfileSetting", + scope: DeviceScope, + setting: NewDefinition("TestSetting", ProfileSetting, IntegerValue), + wantConfigurable: true, + }, + { + name: "DeviceScope/UserSetting", + scope: DeviceScope, + setting: NewDefinition("TestSetting", UserSetting, IntegerValue), + wantConfigurable: true, + }, + { + name: "ProfileScope/DeviceSetting", + scope: CurrentProfileScope, + setting: NewDefinition("TestSetting", DeviceSetting, IntegerValue), + wantConfigurable: false, + }, + { + name: "ProfileScope/ProfileSetting", + scope: CurrentProfileScope, + setting: NewDefinition("TestSetting", ProfileSetting, IntegerValue), + wantConfigurable: true, + }, + { + name: "ProfileScope/UserSetting", + scope: CurrentProfileScope, + setting: NewDefinition("TestSetting", UserSetting, IntegerValue), + wantConfigurable: true, + }, + { + name: "UserScope/DeviceSetting", + scope: CurrentUserScope, + setting: NewDefinition("TestSetting", DeviceSetting, IntegerValue), + wantConfigurable: false, + }, + { + name: "UserScope/ProfileSetting", + scope: CurrentUserScope, + setting: NewDefinition("TestSetting", ProfileSetting, IntegerValue), + wantConfigurable: false, + }, + { + name: "UserScope/UserSetting", + scope: CurrentUserScope, + setting: NewDefinition("TestSetting", UserSetting, IntegerValue), + wantConfigurable: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotConfigurable := tt.scope.IsConfigurableSetting(tt.setting) + if gotConfigurable != tt.wantConfigurable { + t.Fatalf("got %v, want %v", gotConfigurable, tt.wantConfigurable) + } + }) + } +} + +func TestPolicyScopeContains(t *testing.T) { + tests := []struct { + name string + scopeA PolicyScope + scopeB PolicyScope + wantAContainsB bool + wantAStrictlyContainsB bool + }{ + { + name: "DeviceScope/DeviceScope", + scopeA: DeviceScope, + scopeB: DeviceScope, + wantAContainsB: true, + wantAStrictlyContainsB: false, + }, + { + name: "DeviceScope/CurrentProfileScope", + scopeA: DeviceScope, + scopeB: CurrentProfileScope, + wantAContainsB: true, + wantAStrictlyContainsB: true, + }, + { + name: "DeviceScope/UserScope", + scopeA: DeviceScope, + scopeB: CurrentUserScope, + wantAContainsB: true, + wantAStrictlyContainsB: true, + }, + { + name: "ProfileScope/DeviceScope", + scopeA: CurrentProfileScope, + scopeB: DeviceScope, + wantAContainsB: false, + wantAStrictlyContainsB: false, + }, + { + name: "ProfileScope/ProfileScope", + scopeA: CurrentProfileScope, + scopeB: CurrentProfileScope, + wantAContainsB: true, + wantAStrictlyContainsB: false, + }, + { + name: "ProfileScope/UserScope", + scopeA: CurrentProfileScope, + scopeB: CurrentUserScope, + wantAContainsB: true, + wantAStrictlyContainsB: true, + }, + { + name: "UserScope/DeviceScope", + scopeA: CurrentUserScope, + scopeB: DeviceScope, + wantAContainsB: false, + wantAStrictlyContainsB: false, + }, + { + name: "UserScope/ProfileScope", + scopeA: CurrentUserScope, + scopeB: CurrentProfileScope, + wantAContainsB: false, + wantAStrictlyContainsB: false, + }, + { + name: "UserScope/UserScope", + scopeA: CurrentUserScope, + scopeB: CurrentUserScope, + wantAContainsB: true, + wantAStrictlyContainsB: false, + }, + { + name: "UserScope(1234)/UserScope(1234)", + scopeA: UserScopeOf("1234"), + scopeB: UserScopeOf("1234"), + wantAContainsB: true, + wantAStrictlyContainsB: false, + }, + { + name: "UserScope(1234)/UserScope(5678)", + scopeA: UserScopeOf("1234"), + scopeB: UserScopeOf("5678"), + wantAContainsB: false, + wantAStrictlyContainsB: false, + }, + { + name: "ProfileScope(A)/UserScope(A/1234)", + scopeA: PolicyScope{kind: ProfileSetting, profileID: "A"}, + scopeB: PolicyScope{kind: UserSetting, userID: "1234", profileID: "A"}, + wantAContainsB: true, + wantAStrictlyContainsB: true, + }, + { + name: "ProfileScope(A)/UserScope(B/1234)", + scopeA: PolicyScope{kind: ProfileSetting, profileID: "A"}, + scopeB: PolicyScope{kind: UserSetting, userID: "1234", profileID: "B"}, + wantAContainsB: false, + wantAStrictlyContainsB: false, + }, + { + name: "UserScope(1234)/UserScope(A/1234)", + scopeA: PolicyScope{kind: UserSetting, userID: "1234"}, + scopeB: PolicyScope{kind: UserSetting, userID: "1234", profileID: "A"}, + wantAContainsB: true, + wantAStrictlyContainsB: true, + }, + { + name: "UserScope(1234)/UserScope(A/5678)", + scopeA: PolicyScope{kind: UserSetting, userID: "1234"}, + scopeB: PolicyScope{kind: UserSetting, userID: "5678", profileID: "A"}, + wantAContainsB: false, + wantAStrictlyContainsB: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotContains := tt.scopeA.Contains(tt.scopeB) + if gotContains != tt.wantAContainsB { + t.Fatalf("WithinOf: got %v, want %v", gotContains, tt.wantAContainsB) + } + + gotStrictlyContains := tt.scopeA.StrictlyContains(tt.scopeB) + if gotStrictlyContains != tt.wantAStrictlyContainsB { + t.Fatalf("StrictlyWithinOf: got %v, want %v", gotStrictlyContains, tt.wantAStrictlyContainsB) + } + }) + } +} + +func TestPolicyScopeMarshalUnmarshal(t *testing.T) { + tests := []struct { + name string + in any + wantJSON string + wantError bool + }{ + { + name: "null-scope", + in: &struct { + Scope PolicyScope + }{}, + wantJSON: `{"Scope":"Device"}`, + }, + { + name: "null-scope-omit-zero", + in: &struct { + Scope PolicyScope `json:",omitzero"` + }{}, + wantJSON: `{}`, + }, + { + name: "device-scope", + in: &struct { + Scope PolicyScope + }{DeviceScope}, + wantJSON: `{"Scope":"Device"}`, + }, + { + name: "current-profile-scope", + in: &struct { + Scope PolicyScope + }{CurrentProfileScope}, + wantJSON: `{"Scope":"Profile"}`, + }, + { + name: "current-user-scope", + in: &struct { + Scope PolicyScope + }{CurrentUserScope}, + wantJSON: `{"Scope":"User"}`, + }, + { + name: "specific-user-scope", + in: &struct { + Scope PolicyScope + }{UserScopeOf("_")}, + wantJSON: `{"Scope":"User(_)"}`, + }, + { + name: "specific-user-scope", + in: &struct { + Scope PolicyScope + }{UserScopeOf("S-1-5-21-3698941153-1525015703-2649197413-1001")}, + wantJSON: `{"Scope":"User(S-1-5-21-3698941153-1525015703-2649197413-1001)"}`, + }, + { + name: "specific-profile-scope", + in: &struct { + Scope PolicyScope + }{PolicyScope{kind: ProfileSetting, profileID: "1234"}}, + wantJSON: `{"Scope":"Profile(1234)"}`, + }, + { + name: "specific-profile-and-user-scope", + in: &struct { + Scope PolicyScope + }{PolicyScope{ + kind: UserSetting, + profileID: "1234", + userID: "S-1-5-21-3698941153-1525015703-2649197413-1001", + }}, + wantJSON: `{"Scope":"Profile(1234)/User(S-1-5-21-3698941153-1525015703-2649197413-1001)"}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotJSON, err := jsonv2.Marshal(tt.in) + if err != nil { + t.Fatalf("Marshal failed: %v", err) + } + if string(gotJSON) != tt.wantJSON { + t.Fatalf("Marshal got %s, want %s", gotJSON, tt.wantJSON) + } + wantBack := tt.in + gotBack := reflect.New(reflect.TypeOf(tt.in).Elem()).Interface() + err = jsonv2.Unmarshal(gotJSON, gotBack) + if err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if !reflect.DeepEqual(gotBack, wantBack) { + t.Fatalf("Unmarshal got %+v, want %+v", gotBack, wantBack) + } + }) + } +} + +func TestPolicyScopeUnmarshalSpecial(t *testing.T) { + tests := []struct { + name string + json string + want any + wantError bool + }{ + { + name: "empty", + json: "{}", + want: &struct { + Scope PolicyScope + }{}, + }, + { + name: "too-many-scopes", + json: `{"Scope":"Device/Profile/User"}`, + wantError: true, + }, + { + name: "user/profile", // incorrect order + json: `{"Scope":"User/Profile"}`, + wantError: true, + }, + { + name: "profile-user-no-params", + json: `{"Scope":"Profile/User"}`, + want: &struct { + Scope PolicyScope + }{CurrentUserScope}, + }, + { + name: "unknown-scope", + json: `{"Scope":"Unknown"}`, + wantError: true, + }, + { + name: "unknown-scope/unknown-scope", + json: `{"Scope":"Unknown/Unknown"}`, + wantError: true, + }, + { + name: "device-scope/unknown-scope", + json: `{"Scope":"Device/Unknown"}`, + wantError: true, + }, + { + name: "unknown-scope/device-scope", + json: `{"Scope":"Unknown/Device"}`, + wantError: true, + }, + { + name: "slash", + json: `{"Scope":"/"}`, + wantError: true, + }, + { + name: "empty", + json: `{"Scope": ""`, + wantError: true, + }, + { + name: "no-closing-bracket", + json: `{"Scope": "user(1234"`, + wantError: true, + }, + { + name: "device-with-id", + json: `{"Scope": "device(123)"`, + wantError: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := &struct { + Scope PolicyScope + }{} + err := jsonv2.Unmarshal([]byte(tt.json), got) + if (err != nil) != tt.wantError { + t.Errorf("Marshal error: got %v, want %v", err, tt.wantError) + } + if err != nil { + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Fatalf("Unmarshal got %+v, want %+v", got, tt.want) + } + }) + } + +} + +func TestExtractScopeAndParams(t *testing.T) { + tests := []struct { + name string + s string + scope string + params string + wantOk bool + }{ + { + name: "empty", + s: "", + wantOk: true, + }, + { + name: "scope-only", + s: "device", + scope: "device", + wantOk: true, + }, + { + name: "scope-with-params", + s: "user(1234)", + scope: "user", + params: "1234", + wantOk: true, + }, + { + name: "params-empty-scope", + s: "(1234)", + scope: "", + params: "1234", + wantOk: true, + }, + { + name: "params-with-brackets", + s: "test()())))())", + scope: "test", + params: ")())))()", + wantOk: true, + }, + { + name: "no-closing-bracket", + s: "user(1234", + scope: "", + params: "", + wantOk: false, + }, + { + name: "open-before-close", + s: ")user(1234", + scope: "", + params: "", + wantOk: false, + }, + { + name: "brackets-only", + s: ")(", + scope: "", + params: "", + wantOk: false, + }, + { + name: "closing-bracket", + s: ")", + scope: "", + params: "", + wantOk: false, + }, + { + name: "opening-bracket", + s: ")", + scope: "", + params: "", + wantOk: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scope, params, ok := extractScopeAndParams(tt.s) + if ok != tt.wantOk { + t.Logf("OK: got %v; want %v", ok, tt.wantOk) + } + if scope != tt.scope { + t.Logf("Scope: got %q; want %q", scope, tt.scope) + } + if params != tt.params { + t.Logf("Params: got %v; want %v", params, tt.params) + } + }) + } +} diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go new file mode 100644 index 0000000000000..30480d8923f71 --- /dev/null +++ b/util/syspolicy/setting/raw_item.go @@ -0,0 +1,67 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "fmt" + + "tailscale.com/types/structs" +) + +// RawItem contains a raw policy setting value as read from a policy store, or an +// error if the requested setting could not be read from the store. As a special +// case, it may also hold a value of the [Visibility], [PreferenceOption], +// or [time.Duration] types. While the policy store interface does not support +// these types natively, and the values of these types have to be unmarshalled +// or converted from strings, these setting types predate the typed policy +// hierarchies, and must be supported at this layer. +type RawItem struct { + _ structs.Incomparable + value any + err *ErrorText + origin *Origin // or nil +} + +// RawItemOf returns a [RawItem] with the specified value. +func RawItemOf(value any) RawItem { + return RawItemWith(value, nil, nil) +} + +// RawItemWith returns a [RawItem] with the specified value, error and origin. +func RawItemWith(value any, err *ErrorText, origin *Origin) RawItem { + return RawItem{value: value, err: err, origin: origin} +} + +// Value returns the value of the policy setting, or nil if the policy setting +// is not configured, or an error occurred while reading it. +func (i RawItem) Value() any { + return i.value +} + +// Error returns the error that occurred when reading the policy setting, +// or nil if no error occurred. +func (i RawItem) Error() error { + if i.err != nil { + return i.err + } + return nil +} + +// Origin returns an optional [Origin] indicating where the policy setting is +// configured. +func (i RawItem) Origin() *Origin { + return i.origin +} + +// String implements [fmt.Stringer]. +func (i RawItem) String() string { + var suffix string + if i.origin != nil { + suffix = fmt.Sprintf(" - {%v}", i.origin) + } + if i.err != nil { + return fmt.Sprintf("Error{%q}%s", i.err.Error(), suffix) + } + return fmt.Sprintf("%v%s", i.value, suffix) +} diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go new file mode 100644 index 0000000000000..93be287b11e86 --- /dev/null +++ b/util/syspolicy/setting/setting.go @@ -0,0 +1,348 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package setting contains types for defining and representing policy settings. +// It facilitates the registration of setting definitions using [Register] and [RegisterDefinition], +// and the retrieval of registered setting definitions via [Definitions] and [DefinitionOf]. +// This package is intended for use primarily within the syspolicy package hierarchy. +package setting + +import ( + "fmt" + "slices" + "strings" + "sync" + "time" + + "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/internal" +) + +// Scope indicates the broadest scope at which a policy setting may apply, +// and the narrowest scope at which it may be configured. +type Scope int8 + +const ( + // DeviceSetting indicates a policy setting that applies to a device, regardless of + // which OS user or Tailscale profile is currently active, if any. + // It can only be configured at a [DeviceScope]. + DeviceSetting Scope = iota + // ProfileSetting indicates a policy setting that applies to a Tailscale profile. + // It can only be configured for a specific profile or at a [DeviceScope], + // in which case it applies to all profiles on the device. + ProfileSetting + // UserSetting indicates a policy setting that applies to users. + // It can be configured for a user, profile, or the entire device. + UserSetting + + // NumScopes is the number of possible [Scope] values. + NumScopes int = iota // must be the last value in the const block. +) + +// String implements [fmt.Stringer]. +func (s Scope) String() string { + switch s { + case DeviceSetting: + return "Device" + case ProfileSetting: + return "Profile" + case UserSetting: + return "User" + default: + panic("unreachable") + } +} + +// MarshalText implements [encoding.TextMarshaler]. +func (s Scope) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler]. +func (s *Scope) UnmarshalText(text []byte) error { + switch strings.ToLower(string(text)) { + case "device": + *s = DeviceSetting + case "profile": + *s = ProfileSetting + case "user": + *s = UserSetting + default: + return fmt.Errorf("%q is not a valid scope", string(text)) + } + return nil +} + +// Type is a policy setting value type. +// Except for [InvalidValue], which represents an invalid policy setting type, +// and [PreferenceOptionValue], [VisibilityValue], and [DurationValue], +// which have special handling due to their legacy status in the package, +// SettingTypes represent the raw value types readable from policy stores. +type Type int + +const ( + // InvalidValue indicates an invalid policy setting value type. + InvalidValue Type = iota + // BooleanValue indicates a policy setting whose underlying type is a bool. + BooleanValue + // IntegerValue indicates a policy setting whose underlying type is a uint64. + IntegerValue + // StringValue indicates a policy setting whose underlying type is a string. + StringValue + // StringListValue indicates a policy setting whose underlying type is a []string. + StringListValue + // PreferenceOptionValue indicates a three-state policy setting whose + // underlying type is a string, but the actual value is a [PreferenceOption]. + PreferenceOptionValue + // VisibilityValue indicates a two-state boolean-like policy setting whose + // underlying type is a string, but the actual value is a [Visibility]. + VisibilityValue + // DurationValue indicates an interval/period/duration policy setting whose + // underlying type is a string, but the actual value is a [time.Duration]. + DurationValue +) + +// String returns a string representation of t. +func (t Type) String() string { + switch t { + case InvalidValue: + return "Invalid" + case BooleanValue: + return "Boolean" + case IntegerValue: + return "Integer" + case StringValue: + return "String" + case StringListValue: + return "StringList" + case PreferenceOptionValue: + return "PreferenceOption" + case VisibilityValue: + return "Visibility" + case DurationValue: + return "Duration" + default: + panic("unreachable") + } +} + +// ValueType is a constraint that allows Go types corresponding to [Type]. +type ValueType interface { + bool | uint64 | string | []string | Visibility | PreferenceOption | time.Duration +} + +// Definition defines policy key, scope and value type. +type Definition struct { + key Key + scope Scope + typ Type + platforms PlatformList +} + +// NewDefinition returns a new [Definition] with the specified +// key, scope, type and supported platforms (see [PlatformList]). +func NewDefinition(k Key, s Scope, t Type, platforms ...string) *Definition { + return &Definition{key: k, scope: s, typ: t, platforms: platforms} +} + +// Key returns a policy setting's identifier. +func (d *Definition) Key() Key { + if d == nil { + return "" + } + return d.key +} + +// Scope reports the broadest [Scope] the policy setting may apply to. +func (d *Definition) Scope() Scope { + if d == nil { + return 0 + } + return d.scope +} + +// Type reports the underlying value type of the policy setting. +func (d *Definition) Type() Type { + if d == nil { + return InvalidValue + } + return d.typ +} + +// IsSupported reports whether the policy setting is supported on the current OS. +func (d *Definition) IsSupported() bool { + if d == nil { + return false + } + return d.platforms.HasCurrent() +} + +// SupportedPlatforms reports platforms on which the policy setting is supported. +// An empty [PlatformList] indicates that s is available on all platforms. +func (d *Definition) SupportedPlatforms() PlatformList { + if d == nil { + return nil + } + return d.platforms +} + +// String implements [fmt.Stringer]. +func (d *Definition) String() string { + if d == nil { + return "(nil)" + } + return fmt.Sprintf("%v(%q, %v)", d.scope, d.key, d.typ) +} + +// Equal reports whether d and d2 have the same key, type and scope. +// It does not check whether both s and s2 are supported on the same platforms. +func (d *Definition) Equal(d2 *Definition) bool { + if d == d2 { + return true + } + if d == nil || d2 == nil { + return false + } + return d.key == d2.key && d.typ == d2.typ && d.scope == d2.scope +} + +// DefinitionMap is a map of setting [Definition] by [Key]. +type DefinitionMap map[Key]*Definition + +var ( + definitions lazy.SyncValue[DefinitionMap] + + definitionsMu sync.Mutex + definitionsList []*Definition + definitionsUsed bool +) + +// Register registers a policy setting with the specified key, scope, value type, +// and an optional list of supported platforms. All policy settings must be +// registered before any of them can be used. Register panics if called after +// invoking any functions that use the registered policy definitions. This +// includes calling [Definitions] or [DefinitionOf] directly, or reading any +// policy settings via syspolicy. +func Register(k Key, s Scope, t Type, platforms ...string) { + RegisterDefinition(NewDefinition(k, s, t, platforms...)) +} + +// RegisterDefinition is like [Register], but accepts a [Definition]. +func RegisterDefinition(d *Definition) { + definitionsMu.Lock() + defer definitionsMu.Unlock() + registerLocked(d) +} + +func registerLocked(d *Definition) { + if definitionsUsed { + panic("policy definitions are already in use") + } + definitionsList = append(definitionsList, d) +} + +func settingDefinitions() (DefinitionMap, error) { + return definitions.GetErr(func() (DefinitionMap, error) { + definitionsMu.Lock() + defer definitionsMu.Unlock() + definitionsUsed = true + return DefinitionMapOf(definitionsList) + }) +} + +// DefinitionMapOf returns a [DefinitionMap] with the specified settings, +// or an error if any settings have the same key but different type or scope. +func DefinitionMapOf(settings []*Definition) (DefinitionMap, error) { + m := make(DefinitionMap, len(settings)) + for _, s := range settings { + if existing, exists := m[s.key]; exists { + if existing.Equal(s) { + // Ignore duplicate setting definitions if they match. It is acceptable + // if the same policy setting was registered more than once + // (e.g. by the syspolicy package itself and by iOS/Android code). + existing.platforms.mergeFrom(s.platforms) + continue + } + return nil, fmt.Errorf("duplicate policy definition: %q", s.key) + } + m[s.key] = s + } + return m, nil +} + +// SetDefinitionsForTest allows to register the specified setting definitions +// for the test duration. It is not concurrency-safe, but unlike [Register], +// it does not panic and can be called anytime. +// It returns an error if ds contains two different settings with the same [Key]. +func SetDefinitionsForTest(tb lazy.TB, ds ...*Definition) error { + m, err := DefinitionMapOf(ds) + if err != nil { + return err + } + definitions.SetForTest(tb, m, err) + return nil +} + +// DefinitionOf returns a setting definition by key, +// or [ErrNoSuchKey] if the specified key does not exist, +// or an error if there are conflicting policy definitions. +func DefinitionOf(k Key) (*Definition, error) { + ds, err := settingDefinitions() + if err != nil { + return nil, err + } + if d, ok := ds[k]; ok { + return d, nil + } + return nil, ErrNoSuchKey +} + +// Definitions returns all registered setting definitions, +// or an error if different policies were registered under the same name. +func Definitions() ([]*Definition, error) { + ds, err := settingDefinitions() + if err != nil { + return nil, err + } + res := make([]*Definition, 0, len(ds)) + for _, d := range ds { + res = append(res, d) + } + return res, nil +} + +// PlatformList is a list of OSes. +// An empty list indicates that all possible platforms are supported. +type PlatformList []string + +// Has reports whether l contains the target platform. +func (l PlatformList) Has(target string) bool { + if len(l) == 0 { + return true + } + return slices.ContainsFunc(l, func(os string) bool { + return strings.EqualFold(os, target) + }) +} + +// HasCurrent is like Has, but for the current platform. +func (l PlatformList) HasCurrent() bool { + return l.Has(internal.OS()) +} + +// mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions, +// if either l or l2 is empty, the merged result in l will also be empty. +func (l *PlatformList) mergeFrom(l2 PlatformList) { + switch { + case len(*l) == 0: + // No-op. An empty list indicates no platform restrictions. + case len(l2) == 0: + // Merging with an empty list results in an empty list. + *l = l2 + default: + // Append, sort and dedup. + *l = append(*l, l2...) + slices.Sort(*l) + *l = slices.Compact(*l) + } +} diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go new file mode 100644 index 0000000000000..3cc08e7da3d8d --- /dev/null +++ b/util/syspolicy/setting/setting_test.go @@ -0,0 +1,344 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "slices" + "strings" + "testing" + + "tailscale.com/types/lazy" + "tailscale.com/types/ptr" + "tailscale.com/util/syspolicy/internal" +) + +func TestSettingDefinition(t *testing.T) { + tests := []struct { + name string + setting *Definition + osOverride string + wantKey Key + wantScope Scope + wantType Type + wantIsSupported bool + wantSupportedPlatforms PlatformList + wantString string + }{ + { + name: "Nil", + setting: nil, + wantKey: "", + wantScope: 0, + wantType: InvalidValue, + wantIsSupported: false, + wantString: "(nil)", + }, + { + name: "Device/Invalid", + setting: NewDefinition("TestDevicePolicySetting", DeviceSetting, InvalidValue), + wantKey: "TestDevicePolicySetting", + wantScope: DeviceSetting, + wantType: InvalidValue, + wantIsSupported: true, + wantString: `Device("TestDevicePolicySetting", Invalid)`, + }, + { + name: "Device/Integer", + setting: NewDefinition("TestDevicePolicySetting", DeviceSetting, IntegerValue), + wantKey: "TestDevicePolicySetting", + wantScope: DeviceSetting, + wantType: IntegerValue, + wantIsSupported: true, + wantString: `Device("TestDevicePolicySetting", Integer)`, + }, + { + name: "Profile/String", + setting: NewDefinition("TestProfilePolicySetting", ProfileSetting, StringValue), + wantKey: "TestProfilePolicySetting", + wantScope: ProfileSetting, + wantType: StringValue, + wantIsSupported: true, + wantString: `Profile("TestProfilePolicySetting", String)`, + }, + { + name: "Device/StringList", + setting: NewDefinition("AllowedSuggestedExitNodes", DeviceSetting, StringListValue), + wantKey: "AllowedSuggestedExitNodes", + wantScope: DeviceSetting, + wantType: StringListValue, + wantIsSupported: true, + wantString: `Device("AllowedSuggestedExitNodes", StringList)`, + }, + { + name: "Device/PreferenceOption", + setting: NewDefinition("AdvertiseExitNode", DeviceSetting, PreferenceOptionValue), + wantKey: "AdvertiseExitNode", + wantScope: DeviceSetting, + wantType: PreferenceOptionValue, + wantIsSupported: true, + wantString: `Device("AdvertiseExitNode", PreferenceOption)`, + }, + { + name: "User/Boolean", + setting: NewDefinition("TestUserPolicySetting", UserSetting, BooleanValue), + wantKey: "TestUserPolicySetting", + wantScope: UserSetting, + wantType: BooleanValue, + wantIsSupported: true, + wantString: `User("TestUserPolicySetting", Boolean)`, + }, + { + name: "User/Visibility", + setting: NewDefinition("AdminConsole", UserSetting, VisibilityValue), + wantKey: "AdminConsole", + wantScope: UserSetting, + wantType: VisibilityValue, + wantIsSupported: true, + wantString: `User("AdminConsole", Visibility)`, + }, + { + name: "User/Duration", + setting: NewDefinition("KeyExpirationNotice", UserSetting, DurationValue), + wantKey: "KeyExpirationNotice", + wantScope: UserSetting, + wantType: DurationValue, + wantIsSupported: true, + wantString: `User("KeyExpirationNotice", Duration)`, + }, + { + name: "SupportedSetting", + setting: NewDefinition("DesktopPolicySetting", DeviceSetting, StringValue, "macos", "windows"), + osOverride: "windows", + wantKey: "DesktopPolicySetting", + wantScope: DeviceSetting, + wantType: StringValue, + wantIsSupported: true, + wantSupportedPlatforms: PlatformList{"macos", "windows"}, + wantString: `Device("DesktopPolicySetting", String)`, + }, + { + name: "UnsupportedSetting", + setting: NewDefinition("AndroidPolicySetting", DeviceSetting, StringValue, "android"), + osOverride: "macos", + wantKey: "AndroidPolicySetting", + wantScope: DeviceSetting, + wantType: StringValue, + wantIsSupported: false, + wantSupportedPlatforms: PlatformList{"android"}, + wantString: `Device("AndroidPolicySetting", String)`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.osOverride != "" { + internal.OSForTesting.SetForTest(t, tt.osOverride, nil) + } + if !tt.setting.Equal(tt.setting) { + t.Errorf("the setting should be equal to itself") + } + if tt.setting != nil && !tt.setting.Equal(ptr.To(*tt.setting)) { + t.Errorf("the setting should be equal to its shallow copy") + } + if gotKey := tt.setting.Key(); gotKey != tt.wantKey { + t.Errorf("Key: got %q, want %q", gotKey, tt.wantKey) + } + if gotScope := tt.setting.Scope(); gotScope != tt.wantScope { + t.Errorf("Scope: got %v, want %v", gotScope, tt.wantScope) + } + if gotType := tt.setting.Type(); gotType != tt.wantType { + t.Errorf("Type: got %v, want %v", gotType, tt.wantType) + } + if gotIsSupported := tt.setting.IsSupported(); gotIsSupported != tt.wantIsSupported { + t.Errorf("IsSupported: got %v, want %v", gotIsSupported, tt.wantIsSupported) + } + if gotSupportedPlatforms := tt.setting.SupportedPlatforms(); !slices.Equal(gotSupportedPlatforms, tt.wantSupportedPlatforms) { + t.Errorf("SupportedPlatforms: got %v, want %v", gotSupportedPlatforms, tt.wantSupportedPlatforms) + } + if gotString := tt.setting.String(); gotString != tt.wantString { + t.Errorf("String: got %v, want %v", gotString, tt.wantString) + } + }) + } +} + +func TestRegisterSettingDefinition(t *testing.T) { + const testPolicySettingKey Key = "TestPolicySetting" + tests := []struct { + name string + key Key + wantEq *Definition + wantErr error + }{ + { + name: "GetRegistered", + key: "TestPolicySetting", + wantEq: NewDefinition(testPolicySettingKey, DeviceSetting, StringValue), + }, + { + name: "GetNonRegistered", + key: "OtherPolicySetting", + wantEq: nil, + wantErr: ErrNoSuchKey, + }, + } + + resetSettingDefinitions(t) + Register(testPolicySettingKey, DeviceSetting, StringValue) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, gotErr := DefinitionOf(tt.key) + if gotErr != tt.wantErr { + t.Errorf("gotErr %v, wantErr %v", gotErr, tt.wantErr) + } + if !got.Equal(tt.wantEq) { + t.Errorf("got %v, want %v", got, tt.wantEq) + } + }) + } +} + +func TestRegisterAfterUsePanics(t *testing.T) { + resetSettingDefinitions(t) + + Register("TestPolicySetting", DeviceSetting, StringValue) + DefinitionOf("TestPolicySetting") + + func() { + defer func() { + if gotPanic, wantPanic := recover(), "policy definitions are already in use"; gotPanic != wantPanic { + t.Errorf("gotPanic: %q, wantPanic: %q", gotPanic, wantPanic) + } + }() + + Register("TestPolicySetting", DeviceSetting, StringValue) + }() +} + +func TestRegisterDuplicateSettings(t *testing.T) { + + tests := []struct { + name string + settings []*Definition + wantEq *Definition + wantErrStr string + }{ + { + name: "NoConflict/Exact", + settings: []*Definition{ + NewDefinition("TestPolicySetting", DeviceSetting, StringValue), + NewDefinition("TestPolicySetting", DeviceSetting, StringValue), + }, + wantEq: NewDefinition("TestPolicySetting", DeviceSetting, StringValue), + }, + { + name: "NoConflict/MergeOS-First", + settings: []*Definition{ + NewDefinition("TestPolicySetting", DeviceSetting, StringValue, "android", "macos"), + NewDefinition("TestPolicySetting", DeviceSetting, StringValue), // all platforms + }, + wantEq: NewDefinition("TestPolicySetting", DeviceSetting, StringValue), // all platforms + }, + { + name: "NoConflict/MergeOS-Second", + settings: []*Definition{ + NewDefinition("TestPolicySetting", DeviceSetting, StringValue), // all platforms + NewDefinition("TestPolicySetting", DeviceSetting, StringValue, "android", "macos"), + }, + wantEq: NewDefinition("TestPolicySetting", DeviceSetting, StringValue), // all platforms + }, + { + name: "NoConflict/MergeOS-Both", + settings: []*Definition{ + NewDefinition("TestPolicySetting", DeviceSetting, StringValue, "macos"), + NewDefinition("TestPolicySetting", DeviceSetting, StringValue, "windows"), + }, + wantEq: NewDefinition("TestPolicySetting", DeviceSetting, StringValue, "macos", "windows"), + }, + { + name: "Conflict/Scope", + settings: []*Definition{ + NewDefinition("TestPolicySetting", DeviceSetting, StringValue), + NewDefinition("TestPolicySetting", UserSetting, StringValue), + }, + wantEq: nil, + wantErrStr: `duplicate policy definition: "TestPolicySetting"`, + }, + { + name: "Conflict/Type", + settings: []*Definition{ + NewDefinition("TestPolicySetting", UserSetting, StringValue), + NewDefinition("TestPolicySetting", UserSetting, IntegerValue), + }, + wantEq: nil, + wantErrStr: `duplicate policy definition: "TestPolicySetting"`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetSettingDefinitions(t) + for _, s := range tt.settings { + Register(s.Key(), s.Scope(), s.Type(), s.SupportedPlatforms()...) + } + got, err := DefinitionOf("TestPolicySetting") + var gotErrStr string + if err != nil { + gotErrStr = err.Error() + } + if gotErrStr != tt.wantErrStr { + t.Fatalf("ErrStr: got %q, want %q", gotErrStr, tt.wantErrStr) + } + if !got.Equal(tt.wantEq) { + t.Errorf("Definition got %v, want %v", got, tt.wantEq) + } + if !slices.Equal(got.SupportedPlatforms(), tt.wantEq.SupportedPlatforms()) { + t.Errorf("SupportedPlatforms got %v, want %v", got.SupportedPlatforms(), tt.wantEq.SupportedPlatforms()) + } + }) + } +} + +func TestListSettingDefinitions(t *testing.T) { + definitions := []*Definition{ + NewDefinition("TestDevicePolicySetting", DeviceSetting, IntegerValue), + NewDefinition("TestProfilePolicySetting", ProfileSetting, StringValue), + NewDefinition("TestUserPolicySetting", UserSetting, BooleanValue), + NewDefinition("TestStringListPolicySetting", DeviceSetting, StringListValue), + } + if err := SetDefinitionsForTest(t, definitions...); err != nil { + t.Fatalf("SetDefinitionsForTest failed: %v", err) + } + + cmp := func(l, r *Definition) int { + return strings.Compare(string(l.Key()), string(r.Key())) + } + want := append([]*Definition{}, definitions...) + slices.SortFunc(want, cmp) + + got, err := Definitions() + if err != nil { + t.Fatalf("Definitions failed: %v", err) + } + slices.SortFunc(got, cmp) + + if !slices.Equal(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func resetSettingDefinitions(t *testing.T) { + t.Cleanup(func() { + definitionsMu.Lock() + definitionsList = nil + definitions = lazy.SyncValue[DefinitionMap]{} + definitionsUsed = false + definitionsMu.Unlock() + }) + + definitionsMu.Lock() + definitionsList = nil + definitions = lazy.SyncValue[DefinitionMap]{} + definitionsUsed = false + definitionsMu.Unlock() +} diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go new file mode 100644 index 0000000000000..306bf759ea086 --- /dev/null +++ b/util/syspolicy/setting/snapshot.go @@ -0,0 +1,173 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "slices" + "strings" + + xmaps "golang.org/x/exp/maps" + "tailscale.com/util/deephash" +) + +// Snapshot is an immutable collection of ([Key], [RawItem]) pairs, representing +// a set of policy settings applied at a specific moment in time. +// A nil pointer to [Snapshot] is valid. +type Snapshot struct { + m map[Key]RawItem + sig deephash.Sum // of m + summary Summary +} + +// NewSnapshot returns a new [Snapshot] with the specified items and options. +func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot { + return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)} +} + +// All returns a map of all policy settings in s. +// The returned map must not be modified. +func (s *Snapshot) All() map[Key]RawItem { + if s == nil { + return nil + } + // TODO(nickkhyl): return iter.Seq2[[Key], [RawItem]] in Go 1.23, + // and remove [keyItemPair]. + return s.m +} + +// Get returns the value of the policy setting with the specified key +// or nil if it is not configured or has an error. +func (s *Snapshot) Get(k Key) any { + v, _ := s.GetErr(k) + return v +} + +// GetErr returns the value of the policy setting with the specified key, +// [ErrNotConfigured] if it is not configured, or an error returned by +// the policy Store if the policy setting could not be read. +func (s *Snapshot) GetErr(k Key) (any, error) { + if s != nil { + if s, ok := s.m[k]; ok { + return s.Value(), s.Error() + } + } + return nil, ErrNotConfigured +} + +// GetSetting returns the untyped policy setting with the specified key and true +// if a policy setting with such key has been configured; +// otherwise, it returns zero, false. +func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) { + setting, ok = s.m[k] + return setting, ok +} + +// Equal reports whether s and s2 are equal. +func (s *Snapshot) Equal(s2 *Snapshot) bool { + if !s.EqualItems(s2) { + return false + } + return s.Summary() == s2.Summary() +} + +// EqualItems reports whether items in s and s2 are equal. +func (s *Snapshot) EqualItems(s2 *Snapshot) bool { + if s == s2 { + return true + } + if s.Len() != s2.Len() { + return false + } + if s.Len() == 0 { + return true + } + return s.sig == s2.sig +} + +// Keys return an iterator over keys in s. The iteration order is not specified +// and is not guaranteed to be the same from one call to the next. +func (s *Snapshot) Keys() []Key { + if s.m == nil { + return nil + } + // TODO(nickkhyl): return iter.Seq[Key] in Go 1.23. + return xmaps.Keys(s.m) +} + +// Len reports the number of [RawItem]s in s. +func (s *Snapshot) Len() int { + if s == nil { + return 0 + } + return len(s.m) +} + +// Summary returns information about s as a whole rather than about specific [RawItem]s in it. +func (s *Snapshot) Summary() Summary { + if s == nil { + return Summary{} + } + return s.summary +} + +// String implements [fmt.Stringer] +func (s *Snapshot) String() string { + if s.Len() == 0 && s.Summary().IsEmpty() { + return "{Empty}" + } + keys := s.Keys() + slices.Sort(keys) + var sb strings.Builder + if !s.summary.IsEmpty() { + sb.WriteRune('{') + if s.Len() == 0 { + sb.WriteString("Empty, ") + } + sb.WriteString(s.summary.String()) + sb.WriteRune('}') + } + for _, k := range keys { + if sb.Len() != 0 { + sb.WriteRune('\n') + } + sb.WriteString(string(k)) + sb.WriteString(" = ") + sb.WriteString(s.m[k].String()) + } + return sb.String() +} + +// MergeSnapshots returns a [Snapshot] that contains all [RawItem]s +// from snapshot1 and snapshot2 and the [Summary] with the narrower [PolicyScope]. +// If there's a conflict between policy settings in the two snapshots, +// the policy settings from the snapshot with the broader scope take precedence. +// In other words, policy settings configured for the [DeviceScope] win +// over policy settings configured for a user scope. +func MergeSnapshots(snapshot1, snapshot2 *Snapshot) *Snapshot { + scope1, ok1 := snapshot1.Summary().Scope().GetOk() + scope2, ok2 := snapshot2.Summary().Scope().GetOk() + if ok1 && ok2 && scope1.StrictlyContains(scope2) { + // Swap snapshots if snapshot1 has higher precedence than snapshot2. + snapshot1, snapshot2 = snapshot2, snapshot1 + } + if snapshot2.Len() == 0 { + return snapshot1 + } + summaryOpts := make([]SummaryOption, 0, 2) + if scope, ok := snapshot1.Summary().Scope().GetOk(); ok { + // Use the scope from snapshot1, if present, which is the more specific snapshot. + summaryOpts = append(summaryOpts, scope) + } + if snapshot1.Len() == 0 { + if origin, ok := snapshot2.Summary().Origin().GetOk(); ok { + // Use the origin from snapshot2 if snapshot1 is empty. + summaryOpts = append(summaryOpts, origin) + } + return &Snapshot{snapshot2.m, snapshot2.sig, SummaryWith(summaryOpts...)} + } + m := make(map[Key]RawItem, snapshot1.Len()+snapshot2.Len()) + xmaps.Copy(m, snapshot1.m) + xmaps.Copy(m, snapshot2.m) // snapshot2 has higher precedence + return &Snapshot{m, deephash.Hash(&m), SummaryWith(summaryOpts...)} +} diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go new file mode 100644 index 0000000000000..e198d4a58bfdb --- /dev/null +++ b/util/syspolicy/setting/snapshot_test.go @@ -0,0 +1,435 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "testing" + "time" +) + +func TestMergeSnapshots(t *testing.T) { + tests := []struct { + name string + s1, s2 *Snapshot + want *Snapshot + }{ + { + name: "both-nil", + s1: nil, + s2: nil, + want: NewSnapshot(map[Key]RawItem{}), + }, + { + name: "both-empty", + s1: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[Key]RawItem{}), + want: NewSnapshot(map[Key]RawItem{}), + }, + { + name: "first-nil", + s1: nil, + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }), + }, + { + name: "first-empty", + s1: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + }, + { + name: "second-nil", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }), + s2: nil, + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }), + }, + { + name: "second-empty", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + s2: NewSnapshot(map[Key]RawItem{}), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + }, + { + name: "no-conflicts", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + s2: NewSnapshot(map[Key]RawItem{ + "Setting4": {value: 2 * time.Hour}, + "Setting5": {value: VisibleByPolicy}, + "Setting6": {value: ShowChoiceByPolicy}, + }), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + "Setting4": {value: 2 * time.Hour}, + "Setting5": {value: VisibleByPolicy}, + "Setting6": {value: ShowChoiceByPolicy}, + }), + }, + { + name: "with-conflicts", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 456}, + "Setting3": {value: false}, + "Setting4": {value: 2 * time.Hour}, + }), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 456}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + "Setting4": {value: 2 * time.Hour}, + }), + }, + { + name: "with-scope-first-wins", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }, DeviceScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 456}, + "Setting3": {value: false}, + "Setting4": {value: 2 * time.Hour}, + }, CurrentUserScope), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + "Setting4": {value: 2 * time.Hour}, + }, CurrentUserScope), + }, + { + name: "with-scope-second-wins", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }, CurrentUserScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 456}, + "Setting3": {value: false}, + "Setting4": {value: 2 * time.Hour}, + }, DeviceScope), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 456}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + "Setting4": {value: 2 * time.Hour}, + }, CurrentUserScope), + }, + { + name: "with-scope-both-empty", + s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[Key]RawItem{}, DeviceScope), + want: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), + }, + { + name: "with-scope-first-empty", + s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}}, + DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }, CurrentUserScope, NewNamedOrigin("TestPolicy", DeviceScope)), + }, + { + name: "with-scope-second-empty", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }, CurrentUserScope), + s2: NewSnapshot(map[Key]RawItem{}), + want: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }, CurrentUserScope), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := MergeSnapshots(tt.s1, tt.s2) + if !got.Equal(tt.want) { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestSnapshotEqual(t *testing.T) { + tests := []struct { + name string + s1, s2 *Snapshot + wantEqual bool + wantEqualItems bool + }{ + { + name: "nil-nil", + s1: nil, + s2: nil, + wantEqual: true, + wantEqualItems: true, + }, + { + name: "nil-empty", + s1: nil, + s2: NewSnapshot(map[Key]RawItem{}), + wantEqual: true, + wantEqualItems: true, + }, + { + name: "empty-nil", + s1: NewSnapshot(map[Key]RawItem{}), + s2: nil, + wantEqual: true, + wantEqualItems: true, + }, + { + name: "empty-empty", + s1: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[Key]RawItem{}), + wantEqual: true, + wantEqualItems: true, + }, + { + name: "first-nil", + s1: nil, + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + wantEqual: false, + wantEqualItems: false, + }, + { + name: "first-empty", + s1: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + wantEqual: false, + wantEqualItems: false, + }, + { + name: "second-nil", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: true}, + }), + s2: nil, + wantEqual: false, + wantEqualItems: false, + }, + { + name: "second-empty", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + s2: NewSnapshot(map[Key]RawItem{}), + wantEqual: false, + wantEqualItems: false, + }, + { + name: "same-items-same-order-no-scope", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }), + wantEqual: true, + wantEqualItems: true, + }, + { + name: "same-items-same-order-same-scope", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }, DeviceScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }, DeviceScope), + wantEqual: true, + wantEqualItems: true, + }, + { + name: "same-items-different-order-same-scope", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }, DeviceScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting3": {value: false}, + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + }, DeviceScope), + wantEqual: true, + wantEqualItems: true, + }, + { + name: "same-items-same-order-different-scope", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }, DeviceScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }, CurrentUserScope), + wantEqual: false, + wantEqualItems: true, + }, + { + name: "different-items-same-scope", + s1: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 123}, + "Setting2": {value: "String"}, + "Setting3": {value: false}, + }, DeviceScope), + s2: NewSnapshot(map[Key]RawItem{ + "Setting4": {value: 2 * time.Hour}, + "Setting5": {value: VisibleByPolicy}, + "Setting6": {value: ShowChoiceByPolicy}, + }, DeviceScope), + wantEqual: false, + wantEqualItems: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotEqual := tt.s1.Equal(tt.s2); gotEqual != tt.wantEqual { + t.Errorf("WantEqual: got %v, want %v", gotEqual, tt.wantEqual) + } + if gotEqualItems := tt.s1.EqualItems(tt.s2); gotEqualItems != tt.wantEqualItems { + t.Errorf("WantEqualItems: got %v, want %v", gotEqualItems, tt.wantEqualItems) + } + }) + } +} + +func TestSnapshotString(t *testing.T) { + tests := []struct { + name string + snapshot *Snapshot + wantString string + }{ + { + name: "nil", + snapshot: nil, + wantString: "{Empty}", + }, + { + name: "empty", + snapshot: NewSnapshot(nil), + wantString: "{Empty}", + }, + { + name: "empty-with-scope", + snapshot: NewSnapshot(nil, DeviceScope), + wantString: "{Empty, Device}", + }, + { + name: "empty-with-origin", + snapshot: NewSnapshot(nil, NewNamedOrigin("Test Policy", DeviceScope)), + wantString: "{Empty, Test Policy (Device)}", + }, + { + name: "non-empty", + snapshot: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 2 * time.Hour}, + "Setting2": {value: VisibleByPolicy}, + "Setting3": {value: ShowChoiceByPolicy}, + }, NewNamedOrigin("Test Policy", DeviceScope)), + wantString: `{Test Policy (Device)} +Setting1 = 2h0m0s +Setting2 = show +Setting3 = user-decides`, + }, + { + name: "non-empty-with-item-origin", + snapshot: NewSnapshot(map[Key]RawItem{ + "Setting1": {value: 42, origin: NewNamedOrigin("Test Policy", DeviceScope)}, + }), + wantString: `Setting1 = 42 - {Test Policy (Device)}`, + }, + { + name: "non-empty-with-item-error", + snapshot: NewSnapshot(map[Key]RawItem{ + "Setting1": {err: NewErrorText("bang!")}, + }), + wantString: `Setting1 = Error{"bang!"}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotString := tt.snapshot.String(); gotString != tt.wantString { + t.Errorf("got %v\nwant %v", gotString, tt.wantString) + } + }) + } +} diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go new file mode 100644 index 0000000000000..5ff20e0aa2752 --- /dev/null +++ b/util/syspolicy/setting/summary.go @@ -0,0 +1,100 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/opt" +) + +// Summary is an immutable [PolicyScope] and [Origin]. +type Summary struct { + data summary +} + +type summary struct { + Scope opt.Value[PolicyScope] `json:",omitzero"` + Origin opt.Value[Origin] `json:",omitzero"` +} + +// SummaryWith returns a [Summary] with the specified options. +func SummaryWith(opts ...SummaryOption) Summary { + var summary Summary + for _, o := range opts { + o.applySummaryOption(&summary) + } + return summary +} + +// IsEmpty reports whether s is empty. +func (s Summary) IsEmpty() bool { + return s == Summary{} +} + +// Scope reports the [PolicyScope] in s. +func (s Summary) Scope() opt.Value[PolicyScope] { + return s.data.Scope +} + +// Origin reports the [Origin] in s. +func (s Summary) Origin() opt.Value[Origin] { + return s.data.Origin +} + +// String implements [fmt.Stringer]. +func (s Summary) String() string { + if s.IsEmpty() { + return "{Empty}" + } + if origin, ok := s.data.Origin.GetOk(); ok { + return origin.String() + } + return s.data.Scope.String() +} + +// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +func (s Summary) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { + return jsonv2.MarshalEncode(out, &s.data, opts) +} + +// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. +func (s *Summary) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { + return jsonv2.UnmarshalDecode(in, &s.data, opts) +} + +// MarshalJSON implements [json.Marshaler]. +func (s Summary) MarshalJSON() ([]byte, error) { + return jsonv2.Marshal(s) // uses MarshalJSONV2 +} + +// UnmarshalJSON implements [json.Unmarshaler]. +func (s *Summary) UnmarshalJSON(b []byte) error { + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 +} + +// SummaryOption is an option that configures [Summary] +// The following are allowed options: +// +// - [Summary] +// - [PolicyScope] +// - [Origin] +type SummaryOption interface { + applySummaryOption(summary *Summary) +} + +func (s PolicyScope) applySummaryOption(summary *Summary) { + summary.data.Scope.Set(s) +} + +func (o Origin) applySummaryOption(summary *Summary) { + summary.data.Origin.Set(o) + if !summary.data.Scope.IsSet() { + summary.data.Scope.Set(o.Scope()) + } +} + +func (s Summary) applySummaryOption(summary *Summary) { + *summary = s +} diff --git a/util/syspolicy/setting/types.go b/util/syspolicy/setting/types.go new file mode 100644 index 0000000000000..9f110ab034c83 --- /dev/null +++ b/util/syspolicy/setting/types.go @@ -0,0 +1,136 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package setting + +import ( + "encoding" +) + +// PreferenceOption is a policy that governs whether a boolean variable +// is forcibly assigned an administrator-defined value, or allowed to receive +// a user-defined value. +type PreferenceOption byte + +const ( + ShowChoiceByPolicy PreferenceOption = iota + NeverByPolicy + AlwaysByPolicy +) + +// Show returns if the UI option that controls the choice administered by this +// policy should be shown. Currently this is true if and only if the policy is +// [ShowChoiceByPolicy]. +func (p PreferenceOption) Show() bool { + return p == ShowChoiceByPolicy +} + +// ShouldEnable checks if the choice administered by this policy should be +// enabled. If the administrator has chosen a setting, the administrator's +// setting is returned, otherwise userChoice is returned. +func (p PreferenceOption) ShouldEnable(userChoice bool) bool { + switch p { + case NeverByPolicy: + return false + case AlwaysByPolicy: + return true + default: + return userChoice + } +} + +// IsAlways reports whether the preference should always be enabled. +func (p PreferenceOption) IsAlways() bool { + return p == AlwaysByPolicy +} + +// IsNever reports whether the preference should always be disabled. +func (p PreferenceOption) IsNever() bool { + return p == NeverByPolicy +} + +// WillOverride checks if the choice administered by the policy is different +// from the user's choice. +func (p PreferenceOption) WillOverride(userChoice bool) bool { + return p.ShouldEnable(userChoice) != userChoice +} + +// String returns a string representation of p. +func (p PreferenceOption) String() string { + switch p { + case AlwaysByPolicy: + return "always" + case NeverByPolicy: + return "never" + default: + return "user-decides" + } +} + +// MarshalText implements [encoding.TextMarshaler]. +func (p *PreferenceOption) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler]. +// It never fails and sets p to [ShowChoiceByPolicy] if the specified text +// does not represent a valid [PreferenceOption]. +func (p *PreferenceOption) UnmarshalText(text []byte) error { + switch string(text) { + case "always": + *p = AlwaysByPolicy + case "never": + *p = NeverByPolicy + default: + *p = ShowChoiceByPolicy + } + return nil +} + +// Visibility is a policy that controls whether or not a particular +// component of a user interface is to be shown. +type Visibility byte + +var ( + _ encoding.TextMarshaler = (*Visibility)(nil) + _ encoding.TextUnmarshaler = (*Visibility)(nil) +) + +const ( + VisibleByPolicy Visibility = 'v' + HiddenByPolicy Visibility = 'h' +) + +// Show reports whether the UI option administered by this policy should be shown. +// Currently this is true if the policy is not [hiddenByPolicy]. +func (v Visibility) Show() bool { + return v != HiddenByPolicy +} + +// String returns a string representation of v. +func (v Visibility) String() string { + switch v { + case 'h': + return "hide" + default: + return "show" + } +} + +// MarshalText implements [encoding.TextMarshaler]. +func (v Visibility) MarshalText() (text []byte, err error) { + return []byte(v.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler]. +// It never fails and sets v to [VisibleByPolicy] if the specified text +// does not represent a valid [Visibility]. +func (v *Visibility) UnmarshalText(text []byte) error { + switch string(text) { + case "hide": + *v = HiddenByPolicy + default: + *v = VisibleByPolicy + } + return nil +} diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 76e11e2b6e36a..ccfd83347ddca 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -7,6 +7,8 @@ package syspolicy import ( "errors" "time" + + "tailscale.com/util/syspolicy/setting" ) func GetString(key Key, defaultValue string) (string, error) { @@ -45,78 +47,20 @@ func GetStringArray(key Key, defaultValue []string) ([]string, error) { return v, err } -// PreferenceOption is a policy that governs whether a boolean variable -// is forcibly assigned an administrator-defined value, or allowed to receive -// a user-defined value. -type PreferenceOption int - -const ( - showChoiceByPolicy PreferenceOption = iota - neverByPolicy - alwaysByPolicy -) - -// Show returns if the UI option that controls the choice administered by this -// policy should be shown. Currently this is true if and only if the policy is -// showChoiceByPolicy. -func (p PreferenceOption) Show() bool { - return p == showChoiceByPolicy -} - -// ShouldEnable checks if the choice administered by this policy should be -// enabled. If the administrator has chosen a setting, the administrator's -// setting is returned, otherwise userChoice is returned. -func (p PreferenceOption) ShouldEnable(userChoice bool) bool { - switch p { - case neverByPolicy: - return false - case alwaysByPolicy: - return true - default: - return userChoice - } -} - -// WillOverride checks if the choice administered by the policy is different -// from the user's choice. -func (p PreferenceOption) WillOverride(userChoice bool) bool { - return p.ShouldEnable(userChoice) != userChoice -} - // GetPreferenceOption loads a policy from the registry that can be // managed by an enterprise policy management system and allows administrative // overrides of users' choices in a way that we do not want tailcontrol to have // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not // present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name Key) (PreferenceOption, error) { - opt, err := GetString(name, "user-decides") +func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { + s, err := GetString(name, "user-decides") if err != nil { - return showChoiceByPolicy, err - } - switch opt { - case "always": - return alwaysByPolicy, nil - case "never": - return neverByPolicy, nil - default: - return showChoiceByPolicy, nil + return setting.ShowChoiceByPolicy, err } -} - -// Visibility is a policy that controls whether or not a particular -// component of a user interface is to be shown. -type Visibility byte - -const ( - visibleByPolicy Visibility = 'v' - hiddenByPolicy Visibility = 'h' -) - -// Show reports whether the UI option administered by this policy should be shown. -// Currently this is true if and only if the policy is visibleByPolicy. -func (p Visibility) Show() bool { - return p == visibleByPolicy + var opt setting.PreferenceOption + err = opt.UnmarshalText([]byte(s)) + return opt, err } // GetVisibility loads a policy from the registry that can be managed @@ -124,17 +68,14 @@ func (p Visibility) Show() bool { // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name Key) (Visibility, error) { - opt, err := GetString(name, "show") +func GetVisibility(name Key) (setting.Visibility, error) { + s, err := GetString(name, "show") if err != nil { - return visibleByPolicy, err - } - switch opt { - case "hide": - return hiddenByPolicy, nil - default: - return visibleByPolicy, nil + return setting.VisibleByPolicy, err } + var visibility setting.Visibility + visibility.UnmarshalText([]byte(s)) + return visibility, nil } // GetDuration loads a policy from the registry that can be managed diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index c2810ebbbbae0..8280aa1dfbdac 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -8,6 +8,8 @@ import ( "slices" "testing" "time" + + "tailscale.com/util/syspolicy/setting" ) // testHandler encompasses all data types returned when testing any of the syspolicy @@ -230,38 +232,38 @@ func TestGetPreferenceOption(t *testing.T) { key Key handlerValue string handlerError error - wantValue PreferenceOption + wantValue setting.PreferenceOption wantError error }{ { name: "always by policy", key: EnableIncomingConnections, handlerValue: "always", - wantValue: alwaysByPolicy, + wantValue: setting.AlwaysByPolicy, }, { name: "never by policy", key: EnableIncomingConnections, handlerValue: "never", - wantValue: neverByPolicy, + wantValue: setting.NeverByPolicy, }, { name: "use default", key: EnableIncomingConnections, handlerValue: "", - wantValue: showChoiceByPolicy, + wantValue: setting.ShowChoiceByPolicy, }, { name: "read non-existing value", key: EnableIncomingConnections, handlerError: ErrNoSuchKey, - wantValue: showChoiceByPolicy, + wantValue: setting.ShowChoiceByPolicy, }, { name: "other error is returned", key: EnableIncomingConnections, handlerError: someOtherError, - wantValue: showChoiceByPolicy, + wantValue: setting.ShowChoiceByPolicy, wantError: someOtherError, }, } @@ -291,34 +293,34 @@ func TestGetVisibility(t *testing.T) { key Key handlerValue string handlerError error - wantValue Visibility + wantValue setting.Visibility wantError error }{ { name: "hidden by policy", key: AdminConsoleVisibility, handlerValue: "hide", - wantValue: hiddenByPolicy, + wantValue: setting.HiddenByPolicy, }, { name: "visibility default", key: AdminConsoleVisibility, handlerValue: "show", - wantValue: visibleByPolicy, + wantValue: setting.VisibleByPolicy, }, { name: "read non-existing value", key: AdminConsoleVisibility, handlerValue: "show", handlerError: ErrNoSuchKey, - wantValue: visibleByPolicy, + wantValue: setting.VisibleByPolicy, }, { name: "other error is returned", key: AdminConsoleVisibility, handlerValue: "show", handlerError: someOtherError, - wantValue: visibleByPolicy, + wantValue: setting.VisibleByPolicy, wantError: someOtherError, }, } diff --git a/util/winutil/gp/policylock_windows.go b/util/winutil/gp/policylock_windows.go index f92c534bbc374..95453aa16b110 100644 --- a/util/winutil/gp/policylock_windows.go +++ b/util/winutil/gp/policylock_windows.go @@ -189,6 +189,7 @@ func (l *PolicyLock) lockSlow() (err error) { select { case resultCh <- policyLockResult{handle, err}: // lockSlow has received the result. + break send_result default: select { case <-closing: diff --git a/wgengine/capture/ts-dissector.lua b/wgengine/capture/ts-dissector.lua index f7c09465111f0..ad553d7674193 100644 --- a/wgengine/capture/ts-dissector.lua +++ b/wgengine/capture/ts-dissector.lua @@ -5,9 +5,9 @@ end tsdebug_ll = Proto("tsdebug", "Tailscale debug") PATH = ProtoField.string("tsdebug.PATH","PATH", base.ASCII) SNAT_IP_4 = ProtoField.ipv4("tsdebug.SNAT_IP_4", "Pre-NAT Source IPv4 address") -SNAT_IP_6 = ProtoField.ipv4("tsdebug.SNAT_IP_6", "Pre-NAT Source IPv6 address") +SNAT_IP_6 = ProtoField.ipv6("tsdebug.SNAT_IP_6", "Pre-NAT Source IPv6 address") DNAT_IP_4 = ProtoField.ipv4("tsdebug.DNAT_IP_4", "Pre-NAT Dest IPv4 address") -DNAT_IP_6 = ProtoField.ipv4("tsdebug.DNAT_IP_6", "Pre-NAT Dest IPv6 address") +DNAT_IP_6 = ProtoField.ipv6("tsdebug.DNAT_IP_6", "Pre-NAT Dest IPv6 address") tsdebug_ll.fields = {PATH, SNAT_IP_4, SNAT_IP_6, DNAT_IP_4, DNAT_IP_6} function tsdebug_ll.dissector(buffer, pinfo, tree) @@ -63,7 +63,7 @@ local ts_dissectors = DissectorTable.new("ts.proto", "Tailscale-specific dissect tsdisco_meta = Proto("tsdisco", "Tailscale DISCO metadata") DISCO_IS_DERP = ProtoField.bool("tsdisco.IS_DERP","From DERP") DISCO_SRC_IP_4 = ProtoField.ipv4("tsdisco.SRC_IP_4", "Source IPv4 address") -DISCO_SRC_IP_6 = ProtoField.ipv4("tsdisco.SRC_IP_6", "Source IPv6 address") +DISCO_SRC_IP_6 = ProtoField.ipv6("tsdisco.SRC_IP_6", "Source IPv6 address") DISCO_SRC_PORT = ProtoField.uint16("tsdisco.SRC_PORT","Source port", base.DEC) DISCO_DERP_PUB = ProtoField.bytes("tsdisco.DERP_PUB", "DERP public key", base.SPACE) tsdisco_meta.fields = {DISCO_IS_DERP, DISCO_SRC_PORT, DISCO_DERP_PUB, DISCO_SRC_IP_4, DISCO_SRC_IP_6} diff --git a/wgengine/magicsock/batching_conn.go b/wgengine/magicsock/batching_conn.go index 242f31c372d35..5320d1cafa59a 100644 --- a/wgengine/magicsock/batching_conn.go +++ b/wgengine/magicsock/batching_conn.go @@ -4,200 +4,22 @@ package magicsock import ( - "errors" - "net" "net/netip" - "sync" - "sync/atomic" - "syscall" - "time" + "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" - "tailscale.com/net/neterror" "tailscale.com/types/nettype" ) -// xnetBatchReaderWriter defines the batching i/o methods of -// golang.org/x/net/ipv4.PacketConn (and ipv6.PacketConn). -// TODO(jwhited): This should eventually be replaced with the standard library -// implementation of https://github.com/golang/go/issues/45886 -type xnetBatchReaderWriter interface { - xnetBatchReader - xnetBatchWriter -} - -type xnetBatchReader interface { - ReadBatch([]ipv6.Message, int) (int, error) -} - -type xnetBatchWriter interface { - WriteBatch([]ipv6.Message, int) (int, error) -} - -// batchingUDPConn is a UDP socket that provides batched i/o. -type batchingUDPConn struct { - pc nettype.PacketConn - xpc xnetBatchReaderWriter - rxOffload bool // supports UDP GRO or similar - txOffload atomic.Bool // supports UDP GSO or similar - setGSOSizeInControl func(control *[]byte, gsoSize uint16) // typically setGSOSizeInControl(); swappable for testing - getGSOSizeFromControl func(control []byte) (int, error) // typically getGSOSizeFromControl(); swappable for testing - sendBatchPool sync.Pool -} - -func (c *batchingUDPConn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { - if c.rxOffload { - // UDP_GRO is opt-in on Linux via setsockopt(). Once enabled you may - // receive a "monster datagram" from any read call. The ReadFrom() API - // does not support passing the GSO size and is unsafe to use in such a - // case. Other platforms may vary in behavior, but we go with the most - // conservative approach to prevent this from becoming a footgun in the - // future. - return 0, netip.AddrPort{}, errors.New("rx UDP offload is enabled on this socket, single packet reads are unavailable") - } - return c.pc.ReadFromUDPAddrPort(p) -} - -func (c *batchingUDPConn) SetDeadline(t time.Time) error { - return c.pc.SetDeadline(t) -} - -func (c *batchingUDPConn) SetReadDeadline(t time.Time) error { - return c.pc.SetReadDeadline(t) -} - -func (c *batchingUDPConn) SetWriteDeadline(t time.Time) error { - return c.pc.SetWriteDeadline(t) -} - -const ( - // This was initially established for Linux, but may split out to - // GOOS-specific values later. It originates as UDP_MAX_SEGMENTS in the - // kernel's TX path, and UDP_GRO_CNT_MAX for RX. - udpSegmentMaxDatagrams = 64 -) - -const ( - // Exceeding these values results in EMSGSIZE. - maxIPv4PayloadLen = 1<<16 - 1 - 20 - 8 - maxIPv6PayloadLen = 1<<16 - 1 - 8 +var ( + // This acts as a compile-time check for our usage of ipv6.Message in + // batchingConn for both IPv6 and IPv4 operations. + _ ipv6.Message = ipv4.Message{} ) -// coalesceMessages iterates msgs, coalescing them where possible while -// maintaining datagram order. All msgs have their Addr field set to addr. -func (c *batchingUDPConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, msgs []ipv6.Message) int { - var ( - base = -1 // index of msg we are currently coalescing into - gsoSize int // segmentation size of msgs[base] - dgramCnt int // number of dgrams coalesced into msgs[base] - endBatch bool // tracking flag to start a new batch on next iteration of buffs - ) - maxPayloadLen := maxIPv4PayloadLen - if addr.IP.To4() == nil { - maxPayloadLen = maxIPv6PayloadLen - } - for i, buff := range buffs { - if i > 0 { - msgLen := len(buff) - baseLenBefore := len(msgs[base].Buffers[0]) - freeBaseCap := cap(msgs[base].Buffers[0]) - baseLenBefore - if msgLen+baseLenBefore <= maxPayloadLen && - msgLen <= gsoSize && - msgLen <= freeBaseCap && - dgramCnt < udpSegmentMaxDatagrams && - !endBatch { - msgs[base].Buffers[0] = append(msgs[base].Buffers[0], make([]byte, msgLen)...) - copy(msgs[base].Buffers[0][baseLenBefore:], buff) - if i == len(buffs)-1 { - c.setGSOSizeInControl(&msgs[base].OOB, uint16(gsoSize)) - } - dgramCnt++ - if msgLen < gsoSize { - // A smaller than gsoSize packet on the tail is legal, but - // it must end the batch. - endBatch = true - } - continue - } - } - if dgramCnt > 1 { - c.setGSOSizeInControl(&msgs[base].OOB, uint16(gsoSize)) - } - // Reset prior to incrementing base since we are preparing to start a - // new potential batch. - endBatch = false - base++ - gsoSize = len(buff) - msgs[base].OOB = msgs[base].OOB[:0] - msgs[base].Buffers[0] = buff - msgs[base].Addr = addr - dgramCnt = 1 - } - return base + 1 -} - -type sendBatch struct { - msgs []ipv6.Message - ua *net.UDPAddr -} - -func (c *batchingUDPConn) getSendBatch() *sendBatch { - batch := c.sendBatchPool.Get().(*sendBatch) - return batch -} - -func (c *batchingUDPConn) putSendBatch(batch *sendBatch) { - for i := range batch.msgs { - batch.msgs[i] = ipv6.Message{Buffers: batch.msgs[i].Buffers, OOB: batch.msgs[i].OOB} - } - c.sendBatchPool.Put(batch) -} - -func (c *batchingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error { - batch := c.getSendBatch() - defer c.putSendBatch(batch) - if addr.Addr().Is6() { - as16 := addr.Addr().As16() - copy(batch.ua.IP, as16[:]) - batch.ua.IP = batch.ua.IP[:16] - } else { - as4 := addr.Addr().As4() - copy(batch.ua.IP, as4[:]) - batch.ua.IP = batch.ua.IP[:4] - } - batch.ua.Port = int(addr.Port()) - var ( - n int - retried bool - ) -retry: - if c.txOffload.Load() { - n = c.coalesceMessages(batch.ua, buffs, batch.msgs) - } else { - for i := range buffs { - batch.msgs[i].Buffers[0] = buffs[i] - batch.msgs[i].Addr = batch.ua - batch.msgs[i].OOB = batch.msgs[i].OOB[:0] - } - n = len(buffs) - } - - err := c.writeBatch(batch.msgs[:n]) - if err != nil && c.txOffload.Load() && neterror.ShouldDisableUDPGSO(err) { - c.txOffload.Store(false) - retried = true - goto retry - } - if retried { - return neterror.ErrUDPGSODisabled{OnLaddr: c.pc.LocalAddr().String(), RetryErr: err} - } - return err -} - -func (c *batchingUDPConn) SyscallConn() (syscall.RawConn, error) { - sc, ok := c.pc.(syscall.Conn) - if !ok { - return nil, errUnsupportedConnType - } - return sc.SyscallConn() +// batchingConn is a nettype.PacketConn that provides batched i/o. +type batchingConn interface { + nettype.PacketConn + ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) + WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error } diff --git a/wgengine/magicsock/batching_conn_default.go b/wgengine/magicsock/batching_conn_default.go new file mode 100644 index 0000000000000..519cf8082d5ac --- /dev/null +++ b/wgengine/magicsock/batching_conn_default.go @@ -0,0 +1,14 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package magicsock + +import ( + "tailscale.com/types/nettype" +) + +func tryUpgradeToBatchingConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { + return pconn +} diff --git a/wgengine/magicsock/batching_conn_linux.go b/wgengine/magicsock/batching_conn_linux.go new file mode 100644 index 0000000000000..25bf974b022ba --- /dev/null +++ b/wgengine/magicsock/batching_conn_linux.go @@ -0,0 +1,424 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "net/netip" + "runtime" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + "unsafe" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "golang.org/x/sys/unix" + "tailscale.com/hostinfo" + "tailscale.com/net/neterror" + "tailscale.com/types/nettype" +) + +// xnetBatchReaderWriter defines the batching i/o methods of +// golang.org/x/net/ipv4.PacketConn (and ipv6.PacketConn). +// TODO(jwhited): This should eventually be replaced with the standard library +// implementation of https://github.com/golang/go/issues/45886 +type xnetBatchReaderWriter interface { + xnetBatchReader + xnetBatchWriter +} + +type xnetBatchReader interface { + ReadBatch([]ipv6.Message, int) (int, error) +} + +type xnetBatchWriter interface { + WriteBatch([]ipv6.Message, int) (int, error) +} + +// linuxBatchingConn is a UDP socket that provides batched i/o. It implements +// batchingConn. +type linuxBatchingConn struct { + pc nettype.PacketConn + xpc xnetBatchReaderWriter + rxOffload bool // supports UDP GRO or similar + txOffload atomic.Bool // supports UDP GSO or similar + setGSOSizeInControl func(control *[]byte, gsoSize uint16) // typically setGSOSizeInControl(); swappable for testing + getGSOSizeFromControl func(control []byte) (int, error) // typically getGSOSizeFromControl(); swappable for testing + sendBatchPool sync.Pool +} + +func (c *linuxBatchingConn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { + if c.rxOffload { + // UDP_GRO is opt-in on Linux via setsockopt(). Once enabled you may + // receive a "monster datagram" from any read call. The ReadFrom() API + // does not support passing the GSO size and is unsafe to use in such a + // case. Other platforms may vary in behavior, but we go with the most + // conservative approach to prevent this from becoming a footgun in the + // future. + return 0, netip.AddrPort{}, errors.New("rx UDP offload is enabled on this socket, single packet reads are unavailable") + } + return c.pc.ReadFromUDPAddrPort(p) +} + +func (c *linuxBatchingConn) SetDeadline(t time.Time) error { + return c.pc.SetDeadline(t) +} + +func (c *linuxBatchingConn) SetReadDeadline(t time.Time) error { + return c.pc.SetReadDeadline(t) +} + +func (c *linuxBatchingConn) SetWriteDeadline(t time.Time) error { + return c.pc.SetWriteDeadline(t) +} + +const ( + // This was initially established for Linux, but may split out to + // GOOS-specific values later. It originates as UDP_MAX_SEGMENTS in the + // kernel's TX path, and UDP_GRO_CNT_MAX for RX. + udpSegmentMaxDatagrams = 64 +) + +const ( + // Exceeding these values results in EMSGSIZE. + maxIPv4PayloadLen = 1<<16 - 1 - 20 - 8 + maxIPv6PayloadLen = 1<<16 - 1 - 8 +) + +// coalesceMessages iterates msgs, coalescing them where possible while +// maintaining datagram order. All msgs have their Addr field set to addr. +func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, buffs [][]byte, msgs []ipv6.Message) int { + var ( + base = -1 // index of msg we are currently coalescing into + gsoSize int // segmentation size of msgs[base] + dgramCnt int // number of dgrams coalesced into msgs[base] + endBatch bool // tracking flag to start a new batch on next iteration of buffs + ) + maxPayloadLen := maxIPv4PayloadLen + if addr.IP.To4() == nil { + maxPayloadLen = maxIPv6PayloadLen + } + for i, buff := range buffs { + if i > 0 { + msgLen := len(buff) + baseLenBefore := len(msgs[base].Buffers[0]) + freeBaseCap := cap(msgs[base].Buffers[0]) - baseLenBefore + if msgLen+baseLenBefore <= maxPayloadLen && + msgLen <= gsoSize && + msgLen <= freeBaseCap && + dgramCnt < udpSegmentMaxDatagrams && + !endBatch { + msgs[base].Buffers[0] = append(msgs[base].Buffers[0], make([]byte, msgLen)...) + copy(msgs[base].Buffers[0][baseLenBefore:], buff) + if i == len(buffs)-1 { + c.setGSOSizeInControl(&msgs[base].OOB, uint16(gsoSize)) + } + dgramCnt++ + if msgLen < gsoSize { + // A smaller than gsoSize packet on the tail is legal, but + // it must end the batch. + endBatch = true + } + continue + } + } + if dgramCnt > 1 { + c.setGSOSizeInControl(&msgs[base].OOB, uint16(gsoSize)) + } + // Reset prior to incrementing base since we are preparing to start a + // new potential batch. + endBatch = false + base++ + gsoSize = len(buff) + msgs[base].OOB = msgs[base].OOB[:0] + msgs[base].Buffers[0] = buff + msgs[base].Addr = addr + dgramCnt = 1 + } + return base + 1 +} + +type sendBatch struct { + msgs []ipv6.Message + ua *net.UDPAddr +} + +func (c *linuxBatchingConn) getSendBatch() *sendBatch { + batch := c.sendBatchPool.Get().(*sendBatch) + return batch +} + +func (c *linuxBatchingConn) putSendBatch(batch *sendBatch) { + for i := range batch.msgs { + batch.msgs[i] = ipv6.Message{Buffers: batch.msgs[i].Buffers, OOB: batch.msgs[i].OOB} + } + c.sendBatchPool.Put(batch) +} + +func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error { + batch := c.getSendBatch() + defer c.putSendBatch(batch) + if addr.Addr().Is6() { + as16 := addr.Addr().As16() + copy(batch.ua.IP, as16[:]) + batch.ua.IP = batch.ua.IP[:16] + } else { + as4 := addr.Addr().As4() + copy(batch.ua.IP, as4[:]) + batch.ua.IP = batch.ua.IP[:4] + } + batch.ua.Port = int(addr.Port()) + var ( + n int + retried bool + ) +retry: + if c.txOffload.Load() { + n = c.coalesceMessages(batch.ua, buffs, batch.msgs) + } else { + for i := range buffs { + batch.msgs[i].Buffers[0] = buffs[i] + batch.msgs[i].Addr = batch.ua + batch.msgs[i].OOB = batch.msgs[i].OOB[:0] + } + n = len(buffs) + } + + err := c.writeBatch(batch.msgs[:n]) + if err != nil && c.txOffload.Load() && neterror.ShouldDisableUDPGSO(err) { + c.txOffload.Store(false) + retried = true + goto retry + } + if retried { + return neterror.ErrUDPGSODisabled{OnLaddr: c.pc.LocalAddr().String(), RetryErr: err} + } + return err +} + +func (c *linuxBatchingConn) SyscallConn() (syscall.RawConn, error) { + sc, ok := c.pc.(syscall.Conn) + if !ok { + return nil, errUnsupportedConnType + } + return sc.SyscallConn() +} + +func (c *linuxBatchingConn) writeBatch(msgs []ipv6.Message) error { + var head int + for { + n, err := c.xpc.WriteBatch(msgs[head:], 0) + if err != nil || n == len(msgs[head:]) { + // Returning the number of packets written would require + // unraveling individual msg len and gso size during a coalesced + // write. The top of the call stack disregards partial success, + // so keep this simple for now. + return err + } + head += n + } +} + +// splitCoalescedMessages splits coalesced messages from the tail of dst +// beginning at index 'firstMsgAt' into the head of the same slice. It reports +// the number of elements to evaluate in msgs for nonzero len (msgs[i].N). An +// error is returned if a socket control message cannot be parsed or a split +// operation would overflow msgs. +func (c *linuxBatchingConn) splitCoalescedMessages(msgs []ipv6.Message, firstMsgAt int) (n int, err error) { + for i := firstMsgAt; i < len(msgs); i++ { + msg := &msgs[i] + if msg.N == 0 { + return n, err + } + var ( + gsoSize int + start int + end = msg.N + numToSplit = 1 + ) + gsoSize, err = c.getGSOSizeFromControl(msg.OOB[:msg.NN]) + if err != nil { + return n, err + } + if gsoSize > 0 { + numToSplit = (msg.N + gsoSize - 1) / gsoSize + end = gsoSize + } + for j := 0; j < numToSplit; j++ { + if n > i { + return n, errors.New("splitting coalesced packet resulted in overflow") + } + copied := copy(msgs[n].Buffers[0], msg.Buffers[0][start:end]) + msgs[n].N = copied + msgs[n].Addr = msg.Addr + start = end + end += gsoSize + if end > msg.N { + end = msg.N + } + n++ + } + if i != n-1 { + // It is legal for bytes to move within msg.Buffers[0] as a result + // of splitting, so we only zero the source msg len when it is not + // the destination of the last split operation above. + msg.N = 0 + } + } + return n, nil +} + +func (c *linuxBatchingConn) ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) { + if !c.rxOffload || len(msgs) < 2 { + return c.xpc.ReadBatch(msgs, flags) + } + // Read into the tail of msgs, split into the head. + readAt := len(msgs) - 2 + numRead, err := c.xpc.ReadBatch(msgs[readAt:], 0) + if err != nil || numRead == 0 { + return 0, err + } + return c.splitCoalescedMessages(msgs, readAt) +} + +func (c *linuxBatchingConn) LocalAddr() net.Addr { + return c.pc.LocalAddr().(*net.UDPAddr) +} + +func (c *linuxBatchingConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) { + return c.pc.WriteToUDPAddrPort(b, addr) +} + +func (c *linuxBatchingConn) Close() error { + return c.pc.Close() +} + +// tryEnableUDPOffload attempts to enable the UDP_GRO socket option on pconn, +// and returns two booleans indicating TX and RX UDP offload support. +func tryEnableUDPOffload(pconn nettype.PacketConn) (hasTX bool, hasRX bool) { + if c, ok := pconn.(*net.UDPConn); ok { + rc, err := c.SyscallConn() + if err != nil { + return + } + err = rc.Control(func(fd uintptr) { + _, errSyscall := syscall.GetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_SEGMENT) + hasTX = errSyscall == nil + errSyscall = syscall.SetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_GRO, 1) + hasRX = errSyscall == nil + }) + if err != nil { + return false, false + } + } + return hasTX, hasRX +} + +// getGSOSizeFromControl returns the GSO size found in control. If no GSO size +// is found or the len(control) < unix.SizeofCmsghdr, this function returns 0. +// A non-nil error will be returned if len(control) > unix.SizeofCmsghdr but +// its contents cannot be parsed as a socket control message. +func getGSOSizeFromControl(control []byte) (int, error) { + var ( + hdr unix.Cmsghdr + data []byte + rem = control + err error + ) + + for len(rem) > unix.SizeofCmsghdr { + hdr, data, rem, err = unix.ParseOneSocketControlMessage(control) + if err != nil { + return 0, fmt.Errorf("error parsing socket control message: %w", err) + } + if hdr.Level == unix.SOL_UDP && hdr.Type == unix.UDP_GRO && len(data) >= 2 { + return int(binary.NativeEndian.Uint16(data[:2])), nil + } + } + return 0, nil +} + +// setGSOSizeInControl sets a socket control message in control containing +// gsoSize. If len(control) < controlMessageSize control's len will be set to 0. +func setGSOSizeInControl(control *[]byte, gsoSize uint16) { + *control = (*control)[:0] + if cap(*control) < int(unsafe.Sizeof(unix.Cmsghdr{})) { + return + } + if cap(*control) < controlMessageSize { + return + } + *control = (*control)[:cap(*control)] + hdr := (*unix.Cmsghdr)(unsafe.Pointer(&(*control)[0])) + hdr.Level = unix.SOL_UDP + hdr.Type = unix.UDP_SEGMENT + hdr.SetLen(unix.CmsgLen(2)) + binary.NativeEndian.PutUint16((*control)[unix.SizeofCmsghdr:], gsoSize) + *control = (*control)[:unix.CmsgSpace(2)] +} + +// tryUpgradeToBatchingConn probes the capabilities of the OS and pconn, and +// upgrades pconn to a *linuxBatchingConn if appropriate. +func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { + if runtime.GOOS != "linux" { + // Exclude Android. + return pconn + } + if network != "udp4" && network != "udp6" { + return pconn + } + if strings.HasPrefix(hostinfo.GetOSVersion(), "2.") { + // recvmmsg/sendmmsg were added in 2.6.33, but we support down to + // 2.6.32 for old NAS devices. See https://github.com/tailscale/tailscale/issues/6807. + // As a cheap heuristic: if the Linux kernel starts with "2", just + // consider it too old for mmsg. Nobody who cares about performance runs + // such ancient kernels. UDP offload was added much later, so no + // upgrades are available. + return pconn + } + uc, ok := pconn.(*net.UDPConn) + if !ok { + return pconn + } + b := &linuxBatchingConn{ + pc: pconn, + getGSOSizeFromControl: getGSOSizeFromControl, + setGSOSizeInControl: setGSOSizeInControl, + sendBatchPool: sync.Pool{ + New: func() any { + ua := &net.UDPAddr{ + IP: make([]byte, 16), + } + msgs := make([]ipv6.Message, batchSize) + for i := range msgs { + msgs[i].Buffers = make([][]byte, 1) + msgs[i].Addr = ua + msgs[i].OOB = make([]byte, controlMessageSize) + } + return &sendBatch{ + ua: ua, + msgs: msgs, + } + }, + }, + } + switch network { + case "udp4": + b.xpc = ipv4.NewPacketConn(uc) + case "udp6": + b.xpc = ipv6.NewPacketConn(uc) + default: + panic("bogus network") + } + var txOffload bool + txOffload, b.rxOffload = tryEnableUDPOffload(uc) + b.txOffload.Store(txOffload) + return b +} diff --git a/wgengine/magicsock/batching_conn_linux_test.go b/wgengine/magicsock/batching_conn_linux_test.go new file mode 100644 index 0000000000000..5c22bf1c73cf4 --- /dev/null +++ b/wgengine/magicsock/batching_conn_linux_test.go @@ -0,0 +1,244 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "encoding/binary" + "net" + "testing" + + "golang.org/x/net/ipv6" +) + +func setGSOSize(control *[]byte, gsoSize uint16) { + *control = (*control)[:cap(*control)] + binary.LittleEndian.PutUint16(*control, gsoSize) +} + +func getGSOSize(control []byte) (int, error) { + if len(control) < 2 { + return 0, nil + } + return int(binary.LittleEndian.Uint16(control)), nil +} + +func Test_linuxBatchingConn_splitCoalescedMessages(t *testing.T) { + c := &linuxBatchingConn{ + setGSOSizeInControl: setGSOSize, + getGSOSizeFromControl: getGSOSize, + } + + newMsg := func(n, gso int) ipv6.Message { + msg := ipv6.Message{ + Buffers: [][]byte{make([]byte, 1024)}, + N: n, + OOB: make([]byte, 2), + } + binary.LittleEndian.PutUint16(msg.OOB, uint16(gso)) + if gso > 0 { + msg.NN = 2 + } + return msg + } + + cases := []struct { + name string + msgs []ipv6.Message + firstMsgAt int + wantNumEval int + wantMsgLens []int + wantErr bool + }{ + { + name: "second last split last empty", + msgs: []ipv6.Message{ + newMsg(0, 0), + newMsg(0, 0), + newMsg(3, 1), + newMsg(0, 0), + }, + firstMsgAt: 2, + wantNumEval: 3, + wantMsgLens: []int{1, 1, 1, 0}, + wantErr: false, + }, + { + name: "second last no split last empty", + msgs: []ipv6.Message{ + newMsg(0, 0), + newMsg(0, 0), + newMsg(1, 0), + newMsg(0, 0), + }, + firstMsgAt: 2, + wantNumEval: 1, + wantMsgLens: []int{1, 0, 0, 0}, + wantErr: false, + }, + { + name: "second last no split last no split", + msgs: []ipv6.Message{ + newMsg(0, 0), + newMsg(0, 0), + newMsg(1, 0), + newMsg(1, 0), + }, + firstMsgAt: 2, + wantNumEval: 2, + wantMsgLens: []int{1, 1, 0, 0}, + wantErr: false, + }, + { + name: "second last no split last split", + msgs: []ipv6.Message{ + newMsg(0, 0), + newMsg(0, 0), + newMsg(1, 0), + newMsg(3, 1), + }, + firstMsgAt: 2, + wantNumEval: 4, + wantMsgLens: []int{1, 1, 1, 1}, + wantErr: false, + }, + { + name: "second last split last split", + msgs: []ipv6.Message{ + newMsg(0, 0), + newMsg(0, 0), + newMsg(2, 1), + newMsg(2, 1), + }, + firstMsgAt: 2, + wantNumEval: 4, + wantMsgLens: []int{1, 1, 1, 1}, + wantErr: false, + }, + { + name: "second last no split last split overflow", + msgs: []ipv6.Message{ + newMsg(0, 0), + newMsg(0, 0), + newMsg(1, 0), + newMsg(4, 1), + }, + firstMsgAt: 2, + wantNumEval: 4, + wantMsgLens: []int{1, 1, 1, 1}, + wantErr: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + got, err := c.splitCoalescedMessages(tt.msgs, 2) + if err != nil && !tt.wantErr { + t.Fatalf("err: %v", err) + } + if got != tt.wantNumEval { + t.Fatalf("got to eval: %d want: %d", got, tt.wantNumEval) + } + for i, msg := range tt.msgs { + if msg.N != tt.wantMsgLens[i] { + t.Fatalf("msg[%d].N: %d want: %d", i, msg.N, tt.wantMsgLens[i]) + } + } + }) + } +} + +func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { + c := &linuxBatchingConn{ + setGSOSizeInControl: setGSOSize, + getGSOSizeFromControl: getGSOSize, + } + + cases := []struct { + name string + buffs [][]byte + wantLens []int + wantGSO []int + }{ + { + name: "one message no coalesce", + buffs: [][]byte{ + make([]byte, 1, 1), + }, + wantLens: []int{1}, + wantGSO: []int{0}, + }, + { + name: "two messages equal len coalesce", + buffs: [][]byte{ + make([]byte, 1, 2), + make([]byte, 1, 1), + }, + wantLens: []int{2}, + wantGSO: []int{1}, + }, + { + name: "two messages unequal len coalesce", + buffs: [][]byte{ + make([]byte, 2, 3), + make([]byte, 1, 1), + }, + wantLens: []int{3}, + wantGSO: []int{2}, + }, + { + name: "three messages second unequal len coalesce", + buffs: [][]byte{ + make([]byte, 2, 3), + make([]byte, 1, 1), + make([]byte, 2, 2), + }, + wantLens: []int{3, 2}, + wantGSO: []int{2, 0}, + }, + { + name: "three messages limited cap coalesce", + buffs: [][]byte{ + make([]byte, 2, 4), + make([]byte, 2, 2), + make([]byte, 2, 2), + }, + wantLens: []int{4, 2}, + wantGSO: []int{2, 0}, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + addr := &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 1, + } + msgs := make([]ipv6.Message, len(tt.buffs)) + for i := range msgs { + msgs[i].Buffers = make([][]byte, 1) + msgs[i].OOB = make([]byte, 0, 2) + } + got := c.coalesceMessages(addr, tt.buffs, msgs) + if got != len(tt.wantLens) { + t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) + } + for i := range got { + if msgs[i].Addr != addr { + t.Errorf("msgs[%d].Addr != passed addr", i) + } + gotLen := len(msgs[i].Buffers[0]) + if gotLen != tt.wantLens[i] { + t.Errorf("len(msgs[%d].Buffers[0]) %d != %d", i, gotLen, tt.wantLens[i]) + } + gotGSO, err := getGSOSize(msgs[i].OOB) + if err != nil { + t.Fatalf("msgs[%d] getGSOSize err: %v", i, err) + } + if gotGSO != tt.wantGSO[i] { + t.Errorf("msgs[%d] gsoSize %d != %d", i, gotGSO, tt.wantGSO[i]) + } + } + }) + } +} diff --git a/wgengine/magicsock/cloudinfo.go b/wgengine/magicsock/cloudinfo.go new file mode 100644 index 0000000000000..1de369631314c --- /dev/null +++ b/wgengine/magicsock/cloudinfo.go @@ -0,0 +1,182 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(ios || android || js) + +package magicsock + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "slices" + "strings" + "time" + + "tailscale.com/types/logger" + "tailscale.com/util/cloudenv" +) + +const maxCloudInfoWait = 2 * time.Second + +type cloudInfo struct { + client http.Client + logf logger.Logf + + // The following parameters are fixed for the lifetime of the cloudInfo + // object, but are used for testing. + cloud cloudenv.Cloud + endpoint string +} + +func newCloudInfo(logf logger.Logf) *cloudInfo { + tr := &http.Transport{ + DisableKeepAlives: true, + Dial: (&net.Dialer{ + Timeout: maxCloudInfoWait, + }).Dial, + } + + return &cloudInfo{ + client: http.Client{Transport: tr}, + logf: logf, + cloud: cloudenv.Get(), + endpoint: "http://" + cloudenv.CommonNonRoutableMetadataIP, + } +} + +// GetPublicIPs returns any public IPs attached to the current cloud instance, +// if the tailscaled process is running in a known cloud and there are any such +// IPs present. +func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { + switch ci.cloud { + case cloudenv.AWS: + ret, err := ci.getAWS(ctx) + ci.logf("[v1] cloudinfo.GetPublicIPs: AWS: %v, %v", ret, err) + return ret, err + } + + return nil, nil +} + +// getAWSMetadata makes a request to the AWS metadata service at the given +// path, authenticating with the provided IMDSv2 token. The returned metadata +// is split by newline and returned as a slice. +func (ci *cloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([]string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", ci.endpoint+path, nil) + if err != nil { + return nil, fmt.Errorf("creating request to %q: %w", path, err) + } + req.Header.Set("X-aws-ec2-metadata-token", token) + + resp, err := ci.client.Do(req) + if err != nil { + return nil, fmt.Errorf("making request to metadata service %q: %w", path, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // Good + case http.StatusNotFound: + // Nothing found, but this isn't an error; just return + return nil, nil + default: + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading response body for %q: %w", path, err) + } + + return strings.Split(strings.TrimSpace(string(body)), "\n"), nil +} + +// getAWS returns all public IPv4 and IPv6 addresses present in the AWS instance metadata. +func (ci *cloudInfo) getAWS(ctx context.Context) ([]netip.Addr, error) { + ctx, cancel := context.WithTimeout(ctx, maxCloudInfoWait) + defer cancel() + + // Get a token so we can query the metadata service. + req, err := http.NewRequestWithContext(ctx, "PUT", ci.endpoint+"/latest/api/token", nil) + if err != nil { + return nil, fmt.Errorf("creating token request: %w", err) + } + req.Header.Set("X-Aws-Ec2-Metadata-Token-Ttl-Seconds", "10") + + resp, err := ci.client.Do(req) + if err != nil { + return nil, fmt.Errorf("making token request to metadata service: %w", err) + } + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, fmt.Errorf("reading token response body: %w", err) + } + token := string(body) + + server := resp.Header.Get("Server") + if server != "EC2ws" { + return nil, fmt.Errorf("unexpected server header: %q", server) + } + + // Iterate over all interfaces and get their public IP addresses, both IPv4 and IPv6. + macAddrs, err := ci.getAWSMetadata(ctx, token, "/latest/meta-data/network/interfaces/macs/") + if err != nil { + return nil, fmt.Errorf("getting interface MAC addresses: %w", err) + } + + var ( + addrs []netip.Addr + errs []error + ) + + addAddr := func(addr string) { + ip, err := netip.ParseAddr(addr) + if err != nil { + errs = append(errs, fmt.Errorf("parsing IP address %q: %w", addr, err)) + return + } + addrs = append(addrs, ip) + } + for _, mac := range macAddrs { + ips, err := ci.getAWSMetadata(ctx, token, "/latest/meta-data/network/interfaces/macs/"+mac+"/public-ipv4s") + if err != nil { + errs = append(errs, fmt.Errorf("getting IPv4 addresses for %q: %w", mac, err)) + continue + } + + for _, ip := range ips { + addAddr(ip) + } + + // Try querying for IPv6 addresses. + ips, err = ci.getAWSMetadata(ctx, token, "/latest/meta-data/network/interfaces/macs/"+mac+"/ipv6s") + if err != nil { + errs = append(errs, fmt.Errorf("getting IPv6 addresses for %q: %w", mac, err)) + continue + } + for _, ip := range ips { + addAddr(ip) + } + } + + // Sort the returned addresses for determinism. + slices.SortFunc(addrs, func(a, b netip.Addr) int { + return a.Compare(b) + }) + + // Preferentially return any addresses we found, even if there were errors. + if len(addrs) > 0 { + return addrs, nil + } + if len(errs) > 0 { + return nil, fmt.Errorf("getting IP addresses: %w", errors.Join(errs...)) + } + return nil, nil +} diff --git a/wgengine/magicsock/cloudinfo_nocloud.go b/wgengine/magicsock/cloudinfo_nocloud.go new file mode 100644 index 0000000000000..b4414d318c7ea --- /dev/null +++ b/wgengine/magicsock/cloudinfo_nocloud.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios || android || js + +package magicsock + +import ( + "context" + "net/netip" + + "tailscale.com/types/logger" +) + +type cloudInfo struct{} + +func newCloudInfo(_ logger.Logf) *cloudInfo { + return &cloudInfo{} +} + +func (ci *cloudInfo) GetPublicIPs(_ context.Context) ([]netip.Addr, error) { + return nil, nil +} diff --git a/wgengine/magicsock/cloudinfo_test.go b/wgengine/magicsock/cloudinfo_test.go new file mode 100644 index 0000000000000..15191aeefea36 --- /dev/null +++ b/wgengine/magicsock/cloudinfo_test.go @@ -0,0 +1,123 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "context" + "net/http" + "net/http/httptest" + "net/netip" + "slices" + "testing" + + "tailscale.com/util/cloudenv" +) + +func TestCloudInfo_AWS(t *testing.T) { + const ( + mac1 = "06:1d:00:00:00:00" + mac2 = "06:1d:00:00:00:01" + publicV4 = "1.2.3.4" + otherV4_1 = "5.6.7.8" + otherV4_2 = "11.12.13.14" + v6addr = "2001:db8::1" + + macsPrefix = "/latest/meta-data/network/interfaces/macs/" + ) + // Launch a fake AWS IMDS server + fake := &fakeIMDS{ + tb: t, + paths: map[string]string{ + macsPrefix: mac1 + "\n" + mac2, + // This is the "main" public IP address for the instance + macsPrefix + mac1 + "/public-ipv4s": publicV4, + + // There's another interface with two public IPs + // attached to it and an IPv6 address, all of which we + // should discover. + macsPrefix + mac2 + "/public-ipv4s": otherV4_1 + "\n" + otherV4_2, + macsPrefix + mac2 + "/ipv6s": v6addr, + }, + } + + srv := httptest.NewServer(fake) + defer srv.Close() + + ci := newCloudInfo(t.Logf) + ci.cloud = cloudenv.AWS + ci.endpoint = srv.URL + + ips, err := ci.GetPublicIPs(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + wantIPs := []netip.Addr{ + netip.MustParseAddr(publicV4), + netip.MustParseAddr(otherV4_1), + netip.MustParseAddr(otherV4_2), + netip.MustParseAddr(v6addr), + } + if !slices.Equal(ips, wantIPs) { + t.Fatalf("got %v, want %v", ips, wantIPs) + } +} + +func TestCloudInfo_AWSNotPublic(t *testing.T) { + returns404 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "PUT" && r.URL.Path == "/latest/api/token" { + w.Header().Set("Server", "EC2ws") + w.Write([]byte("fake-imds-token")) + return + } + http.NotFound(w, r) + }) + srv := httptest.NewServer(returns404) + defer srv.Close() + + ci := newCloudInfo(t.Logf) + ci.cloud = cloudenv.AWS + ci.endpoint = srv.URL + + // If the IMDS server doesn't return any public IPs, it's not an error + // and we should just get an empty list. + ips, err := ci.GetPublicIPs(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(ips) != 0 { + t.Fatalf("got %v, want none", ips) + } +} + +type fakeIMDS struct { + tb testing.TB + paths map[string]string +} + +func (f *fakeIMDS) ServeHTTP(w http.ResponseWriter, r *http.Request) { + f.tb.Logf("%s %s", r.Method, r.URL.Path) + path := r.URL.Path + + // Handle the /latest/api/token case + const token = "fake-imds-token" + if r.Method == "PUT" && path == "/latest/api/token" { + w.Header().Set("Server", "EC2ws") + w.Write([]byte(token)) + return + } + + // Otherwise, require the IMDSv2 token to be set + if r.Header.Get("X-aws-ec2-metadata-token") != token { + f.tb.Errorf("missing or invalid IMDSv2 token") + http.Error(w, "missing or invalid IMDSv2 token", http.StatusForbidden) + return + } + + if v, ok := f.paths[path]; ok { + w.Write([]byte(v)) + return + } + http.NotFound(w, r) +} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 66264de322b96..7b121d415233e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -14,6 +14,7 @@ import ( "io" "net" "net/netip" + "reflect" "runtime" "strconv" "strings" @@ -24,7 +25,6 @@ import ( "github.com/tailscale/wireguard-go/conn" "go4.org/mem" - "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" "tailscale.com/control/controlknobs" @@ -132,6 +132,9 @@ type Conn struct { // bind is the wireguard-go conn.Bind for Conn. bind *connBind + // cloudInfo is used to query cloud metadata services. + cloudInfo *cloudInfo + // ============================================================ // Fields that must be accessed via atomic load/stores. @@ -311,6 +314,12 @@ type Conn struct { // lastEPERMRebind tracks the last time a rebind was performed // after experiencing a syscall.EPERM. lastEPERMRebind syncs.AtomicValue[time.Time] + + // staticEndpoints are user set endpoints that this node should + // advertise amongst its wireguard endpoints. It is user's + // responsibility to ensure that traffic from these endpoints is routed + // to the node. + staticEndpoints views.Slice[netip.AddrPort] } // SetDebugLoggingEnabled controls whether spammy debug logging is enabled. @@ -418,9 +427,10 @@ func (o *Options) derpActiveFunc() func() { // newConn is the error-free, network-listening-side-effect-free based // of NewConn. Mostly for tests. -func newConn() *Conn { +func newConn(logf logger.Logf) *Conn { discoPrivate := key.NewDisco() c := &Conn{ + logf: logf, derpRecvCh: make(chan derpReadResult, 1), // must be buffered, see issue 3736 derpStarted: make(chan struct{}), peerLastDerp: make(map[key.NodePublic]int), @@ -428,6 +438,7 @@ func newConn() *Conn { discoInfo: make(map[key.DiscoPublic]*discoInfo), discoPrivate: discoPrivate, discoPublic: discoPrivate.Public(), + cloudInfo: newCloudInfo(logf), } c.discoShort = c.discoPublic.ShortString() c.bind = &connBind{Conn: c, closed: true} @@ -455,10 +466,9 @@ func NewConn(opts Options) (*Conn, error) { return nil, errors.New("magicsock.Options.NetMon must be non-nil") } - c := newConn() + c := newConn(opts.logf()) c.port.Store(uint32(opts.Port)) c.controlKnobs = opts.ControlKnobs - c.logf = opts.logf() c.epFunc = opts.endpointsFunc() c.derpActiveFunc = opts.derpActiveFunc() c.idleFunc = opts.IdleFunc @@ -636,6 +646,22 @@ func (c *Conn) setEndpoints(endpoints []tailcfg.Endpoint) (changed bool) { return true } +// SetStaticEndpoints sets static endpoints to the provided value and triggers +// an asynchronous update of the endpoints that this node advertises. +// Static endpoints are endpoints explicitly configured by user. +func (c *Conn) SetStaticEndpoints(ep views.Slice[netip.AddrPort]) { + c.mu.Lock() + if reflect.DeepEqual(c.staticEndpoints.AsSlice(), ep.AsSlice()) { + return + } + c.staticEndpoints = ep + c.mu.Unlock() + // Technically this is not a reSTUNning, but ReSTUN does what we need at + // this point- calls updateEndpoints or queues an update if there is + // already an in-progress update. + c.ReSTUN("static-endpoint-change") +} + // setNetInfoHavePortMap updates NetInfo.HavePortMap to true. func (c *Conn) setNetInfoHavePortMap() { c.mu.Lock() @@ -845,8 +871,10 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { return c.discoPublic } -// determineEndpoints returns the machine's endpoint addresses. It -// does a STUN lookup (via netcheck) to determine its public address. +// determineEndpoints returns the machine's endpoint addresses. It does a STUN +// lookup (via netcheck) to determine its public address. Additionally any +// static enpoints provided by user are always added to the returned endpoints +// without validating if the node can be reached via those endpoints. // // c.mu must NOT be held. func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, error) { @@ -927,6 +955,27 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro addAddr(ap, tailcfg.EndpointExplicitConf) } + // If we're on a cloud instance, we might have a public IPv4 or IPv6 + // address that we can be reached at. Find those, if they exist, and + // add them. + if addrs, err := c.cloudInfo.GetPublicIPs(ctx); err == nil { + var port4, port6 uint16 + if addr := c.pconn4.LocalAddr(); addr != nil { + port4 = uint16(addr.Port) + } + if addr := c.pconn6.LocalAddr(); addr != nil { + port6 = uint16(addr.Port) + } + + for _, addr := range addrs { + if addr.Is4() && port4 > 0 { + addAddr(netip.AddrPortFrom(addr, port4), tailcfg.EndpointLocal) + } else if addr.Is6() && port6 > 0 { + addAddr(netip.AddrPortFrom(addr, port6), tailcfg.EndpointLocal) + } + } + } + // Update our set of endpoints by adding any endpoints that we // previously found but haven't expired yet. This also updates the // cache with the set of endpoints discovered in this function. @@ -943,6 +992,10 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro // re-run. eps = c.endpointTracker.update(time.Now(), eps) + for i := range c.staticEndpoints.Len() { + addAddr(c.staticEndpoints.At(i), tailcfg.EndpointExplicitConf) + } + if localAddr := c.pconn4.LocalAddr(); localAddr.IP.IsUnspecified() { ips, loopback, err := netmon.LocalAddresses() if err != nil { @@ -1047,12 +1100,6 @@ var errNoUDP = errors.New("no UDP available on platform") var errUnsupportedConnType = errors.New("unsupported connection type") -var ( - // This acts as a compile-time check for our usage of ipv6.Message in - // batchingUDPConn for both IPv6 and IPv4 operations. - _ ipv6.Message = ipv4.Message{} -) - func (c *Conn) sendUDPBatch(addr netip.AddrPort, buffs [][]byte) (sent bool, err error) { isIPv6 := false switch { @@ -1227,10 +1274,15 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu // epCache caches an IPPort->endpoint for hot flows. var epCache ippEndpointCache - return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (int, error) { + return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (_ int, retErr error) { if healthItem != nil { healthItem.Enter() defer healthItem.Exit() + defer func() { + if retErr != nil { + c.logf("Receive func %s exiting with error: %T, %v", healthItem.Name(), retErr, retErr) + } + }() } if ruc == nil { panic("nil RebindingUDPConn") @@ -2359,6 +2411,8 @@ func (c *Conn) onPortMapChanged() { c.ReSTUN("portmap-changed") } // ReSTUN triggers an address discovery. // The provided why string is for debug logging only. +// If Conn.staticEndpoints have been updated, calling ReSTUN will also result in +// the new endpoints being advertised. func (c *Conn) ReSTUN(why string) { c.mu.Lock() defer c.mu.Unlock() @@ -2484,6 +2538,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur } } trySetSocketBuffer(pconn, c.logf) + trySetUDPSocketOptions(pconn, c.logf) // Success. if debugBindSocket() { @@ -2600,153 +2655,6 @@ func (c *Conn) ParseEndpoint(nodeKeyStr string) (conn.Endpoint, error) { return ep, nil } -func (c *batchingUDPConn) writeBatch(msgs []ipv6.Message) error { - var head int - for { - n, err := c.xpc.WriteBatch(msgs[head:], 0) - if err != nil || n == len(msgs[head:]) { - // Returning the number of packets written would require - // unraveling individual msg len and gso size during a coalesced - // write. The top of the call stack disregards partial success, - // so keep this simple for now. - return err - } - head += n - } -} - -// splitCoalescedMessages splits coalesced messages from the tail of dst -// beginning at index 'firstMsgAt' into the head of the same slice. It reports -// the number of elements to evaluate in msgs for nonzero len (msgs[i].N). An -// error is returned if a socket control message cannot be parsed or a split -// operation would overflow msgs. -func (c *batchingUDPConn) splitCoalescedMessages(msgs []ipv6.Message, firstMsgAt int) (n int, err error) { - for i := firstMsgAt; i < len(msgs); i++ { - msg := &msgs[i] - if msg.N == 0 { - return n, err - } - var ( - gsoSize int - start int - end = msg.N - numToSplit = 1 - ) - gsoSize, err = c.getGSOSizeFromControl(msg.OOB[:msg.NN]) - if err != nil { - return n, err - } - if gsoSize > 0 { - numToSplit = (msg.N + gsoSize - 1) / gsoSize - end = gsoSize - } - for j := 0; j < numToSplit; j++ { - if n > i { - return n, errors.New("splitting coalesced packet resulted in overflow") - } - copied := copy(msgs[n].Buffers[0], msg.Buffers[0][start:end]) - msgs[n].N = copied - msgs[n].Addr = msg.Addr - start = end - end += gsoSize - if end > msg.N { - end = msg.N - } - n++ - } - if i != n-1 { - // It is legal for bytes to move within msg.Buffers[0] as a result - // of splitting, so we only zero the source msg len when it is not - // the destination of the last split operation above. - msg.N = 0 - } - } - return n, nil -} - -func (c *batchingUDPConn) ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) { - if !c.rxOffload || len(msgs) < 2 { - return c.xpc.ReadBatch(msgs, flags) - } - // Read into the tail of msgs, split into the head. - readAt := len(msgs) - 2 - numRead, err := c.xpc.ReadBatch(msgs[readAt:], 0) - if err != nil || numRead == 0 { - return 0, err - } - return c.splitCoalescedMessages(msgs, readAt) -} - -func (c *batchingUDPConn) LocalAddr() net.Addr { - return c.pc.LocalAddr().(*net.UDPAddr) -} - -func (c *batchingUDPConn) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) { - return c.pc.WriteToUDPAddrPort(b, addr) -} - -func (c *batchingUDPConn) Close() error { - return c.pc.Close() -} - -// tryUpgradeToBatchingUDPConn probes the capabilities of the OS and pconn, and -// upgrades pconn to a *batchingUDPConn if appropriate. -func tryUpgradeToBatchingUDPConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { - if network != "udp4" && network != "udp6" { - return pconn - } - if runtime.GOOS != "linux" { - return pconn - } - if strings.HasPrefix(hostinfo.GetOSVersion(), "2.") { - // recvmmsg/sendmmsg were added in 2.6.33, but we support down to - // 2.6.32 for old NAS devices. See https://github.com/tailscale/tailscale/issues/6807. - // As a cheap heuristic: if the Linux kernel starts with "2", just - // consider it too old for mmsg. Nobody who cares about performance runs - // such ancient kernels. UDP offload was added much later, so no - // upgrades are available. - return pconn - } - uc, ok := pconn.(*net.UDPConn) - if !ok { - return pconn - } - b := &batchingUDPConn{ - pc: pconn, - getGSOSizeFromControl: getGSOSizeFromControl, - setGSOSizeInControl: setGSOSizeInControl, - sendBatchPool: sync.Pool{ - New: func() any { - ua := &net.UDPAddr{ - IP: make([]byte, 16), - } - msgs := make([]ipv6.Message, batchSize) - for i := range msgs { - msgs[i].Buffers = make([][]byte, 1) - msgs[i].Addr = ua - msgs[i].OOB = make([]byte, controlMessageSize) - } - return &sendBatch{ - ua: ua, - msgs: msgs, - } - }, - }, - } - switch network { - case "udp4": - b.xpc = ipv4.NewPacketConn(uc) - case "udp6": - b.xpc = ipv6.NewPacketConn(uc) - default: - panic("bogus network") - } - var txOffload bool - txOffload, b.rxOffload = tryEnableUDPOffload(uc) - b.txOffload.Store(txOffload) - return b -} - func newBlockForeverConn() *blockForeverConn { c := new(blockForeverConn) c.cond = sync.NewCond(&c.mu) diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 87075e5226d32..321765b8c8141 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -21,16 +21,6 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { portableTrySetSocketBuffer(pconn, logf) } -func tryEnableUDPOffload(pconn nettype.PacketConn) (hasTX bool, hasRX bool) { - return false, false -} - -func getGSOSizeFromControl(control []byte) (int, error) { - return 0, nil -} - -func setGSOSizeInControl(control *[]byte, gso uint16) {} - const ( controlMessageSize = 0 ) diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index c484f77c0da1e..a647c90d2b176 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -32,6 +32,9 @@ const ( // Enable/disable using raw sockets to receive disco traffic. var debugDisableRawDisco = envknob.RegisterBool("TS_DEBUG_DISABLE_RAW_DISCO") +// debugRawDiscoReads enables logging of raw disco reads. +var debugRawDiscoReads = envknob.RegisterBool("TS_DEBUG_RAW_DISCO") + // These are our BPF filters that we use for testing packets. var ( magicsockFilterV4 = []bpf.Instruction{ @@ -211,6 +214,9 @@ func (c *Conn) receiveDisco(pc net.PacketConn, isIPV6 bool) { var buf [1500]byte for { n, src, err := pc.ReadFrom(buf[:]) + if debugRawDiscoReads() { + c.logf("raw disco read from %v = (%v, %v)", src, n, err) + } if errors.Is(err, net.ErrClosed) { return } else if err != nil { @@ -318,70 +324,6 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { } } -// tryEnableUDPOffload attempts to enable the UDP_GRO socket option on pconn, -// and returns two booleans indicating TX and RX UDP offload support. -func tryEnableUDPOffload(pconn nettype.PacketConn) (hasTX bool, hasRX bool) { - if c, ok := pconn.(*net.UDPConn); ok { - rc, err := c.SyscallConn() - if err != nil { - return - } - err = rc.Control(func(fd uintptr) { - _, errSyscall := syscall.GetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_SEGMENT) - hasTX = errSyscall == nil - errSyscall = syscall.SetsockoptInt(int(fd), unix.IPPROTO_UDP, unix.UDP_GRO, 1) - hasRX = errSyscall == nil - }) - if err != nil { - return false, false - } - } - return hasTX, hasRX -} - -// getGSOSizeFromControl returns the GSO size found in control. If no GSO size -// is found or the len(control) < unix.SizeofCmsghdr, this function returns 0. -// A non-nil error will be returned if len(control) > unix.SizeofCmsghdr but -// its contents cannot be parsed as a socket control message. -func getGSOSizeFromControl(control []byte) (int, error) { - var ( - hdr unix.Cmsghdr - data []byte - rem = control - err error - ) - - for len(rem) > unix.SizeofCmsghdr { - hdr, data, rem, err = unix.ParseOneSocketControlMessage(control) - if err != nil { - return 0, fmt.Errorf("error parsing socket control message: %w", err) - } - if hdr.Level == unix.SOL_UDP && hdr.Type == unix.UDP_GRO && len(data) >= 2 { - return int(binary.NativeEndian.Uint16(data[:2])), nil - } - } - return 0, nil -} - -// setGSOSizeInControl sets a socket control message in control containing -// gsoSize. If len(control) < controlMessageSize control's len will be set to 0. -func setGSOSizeInControl(control *[]byte, gsoSize uint16) { - *control = (*control)[:0] - if cap(*control) < int(unsafe.Sizeof(unix.Cmsghdr{})) { - return - } - if cap(*control) < controlMessageSize { - return - } - *control = (*control)[:cap(*control)] - hdr := (*unix.Cmsghdr)(unsafe.Pointer(&(*control)[0])) - hdr.Level = unix.SOL_UDP - hdr.Type = unix.UDP_SEGMENT - hdr.SetLen(unix.CmsgLen(2)) - binary.NativeEndian.PutUint16((*control)[unix.SizeofCmsghdr:], gsoSize) - *control = (*control)[:unix.CmsgSpace(2)] -} - var controlMessageSize = -1 // bomb if used for allocation before init func init() { diff --git a/wgengine/magicsock/magicsock_notwindows.go b/wgengine/magicsock/magicsock_notwindows.go new file mode 100644 index 0000000000000..7c31c8202b35e --- /dev/null +++ b/wgengine/magicsock/magicsock_notwindows.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package magicsock + +import ( + "tailscale.com/types/logger" + "tailscale.com/types/nettype" +) + +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) {} diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index cec05dffcddfb..be1b43f56a151 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -35,7 +35,6 @@ import ( xmaps "golang.org/x/exp/maps" "golang.org/x/net/icmp" "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/control/controlknobs" "tailscale.com/derp" @@ -452,7 +451,7 @@ func TestPickDERPFallback(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) - c := newConn() + c := newConn(t.Logf) dm := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: {}, @@ -483,7 +482,7 @@ func TestPickDERPFallback(t *testing.T) { // distribution over nodes works. got := map[int]int{} for range 50 { - c = newConn() + c = newConn(t.Logf) c.derpMap = dm got[c.pickDERPFallback()]++ } @@ -1185,8 +1184,7 @@ func testTwoDevicePing(t *testing.T, d *devices) { } func TestDiscoMessage(t *testing.T) { - c := newConn() - c.logf = t.Logf + c := newConn(t.Logf) c.privateKey = key.NewNode() peer1Pub := c.DiscoPublicKey() @@ -2039,238 +2037,6 @@ func TestBufferedDerpWritesBeforeDrop(t *testing.T) { t.Logf("bufferedDerpWritesBeforeDrop = %d", vv) } -func setGSOSize(control *[]byte, gsoSize uint16) { - *control = (*control)[:cap(*control)] - binary.LittleEndian.PutUint16(*control, gsoSize) -} - -func getGSOSize(control []byte) (int, error) { - if len(control) < 2 { - return 0, nil - } - return int(binary.LittleEndian.Uint16(control)), nil -} - -func Test_batchingUDPConn_splitCoalescedMessages(t *testing.T) { - c := &batchingUDPConn{ - setGSOSizeInControl: setGSOSize, - getGSOSizeFromControl: getGSOSize, - } - - newMsg := func(n, gso int) ipv6.Message { - msg := ipv6.Message{ - Buffers: [][]byte{make([]byte, 1024)}, - N: n, - OOB: make([]byte, 2), - } - binary.LittleEndian.PutUint16(msg.OOB, uint16(gso)) - if gso > 0 { - msg.NN = 2 - } - return msg - } - - cases := []struct { - name string - msgs []ipv6.Message - firstMsgAt int - wantNumEval int - wantMsgLens []int - wantErr bool - }{ - { - name: "second last split last empty", - msgs: []ipv6.Message{ - newMsg(0, 0), - newMsg(0, 0), - newMsg(3, 1), - newMsg(0, 0), - }, - firstMsgAt: 2, - wantNumEval: 3, - wantMsgLens: []int{1, 1, 1, 0}, - wantErr: false, - }, - { - name: "second last no split last empty", - msgs: []ipv6.Message{ - newMsg(0, 0), - newMsg(0, 0), - newMsg(1, 0), - newMsg(0, 0), - }, - firstMsgAt: 2, - wantNumEval: 1, - wantMsgLens: []int{1, 0, 0, 0}, - wantErr: false, - }, - { - name: "second last no split last no split", - msgs: []ipv6.Message{ - newMsg(0, 0), - newMsg(0, 0), - newMsg(1, 0), - newMsg(1, 0), - }, - firstMsgAt: 2, - wantNumEval: 2, - wantMsgLens: []int{1, 1, 0, 0}, - wantErr: false, - }, - { - name: "second last no split last split", - msgs: []ipv6.Message{ - newMsg(0, 0), - newMsg(0, 0), - newMsg(1, 0), - newMsg(3, 1), - }, - firstMsgAt: 2, - wantNumEval: 4, - wantMsgLens: []int{1, 1, 1, 1}, - wantErr: false, - }, - { - name: "second last split last split", - msgs: []ipv6.Message{ - newMsg(0, 0), - newMsg(0, 0), - newMsg(2, 1), - newMsg(2, 1), - }, - firstMsgAt: 2, - wantNumEval: 4, - wantMsgLens: []int{1, 1, 1, 1}, - wantErr: false, - }, - { - name: "second last no split last split overflow", - msgs: []ipv6.Message{ - newMsg(0, 0), - newMsg(0, 0), - newMsg(1, 0), - newMsg(4, 1), - }, - firstMsgAt: 2, - wantNumEval: 4, - wantMsgLens: []int{1, 1, 1, 1}, - wantErr: true, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - got, err := c.splitCoalescedMessages(tt.msgs, 2) - if err != nil && !tt.wantErr { - t.Fatalf("err: %v", err) - } - if got != tt.wantNumEval { - t.Fatalf("got to eval: %d want: %d", got, tt.wantNumEval) - } - for i, msg := range tt.msgs { - if msg.N != tt.wantMsgLens[i] { - t.Fatalf("msg[%d].N: %d want: %d", i, msg.N, tt.wantMsgLens[i]) - } - } - }) - } -} - -func Test_batchingUDPConn_coalesceMessages(t *testing.T) { - c := &batchingUDPConn{ - setGSOSizeInControl: setGSOSize, - getGSOSizeFromControl: getGSOSize, - } - - cases := []struct { - name string - buffs [][]byte - wantLens []int - wantGSO []int - }{ - { - name: "one message no coalesce", - buffs: [][]byte{ - make([]byte, 1, 1), - }, - wantLens: []int{1}, - wantGSO: []int{0}, - }, - { - name: "two messages equal len coalesce", - buffs: [][]byte{ - make([]byte, 1, 2), - make([]byte, 1, 1), - }, - wantLens: []int{2}, - wantGSO: []int{1}, - }, - { - name: "two messages unequal len coalesce", - buffs: [][]byte{ - make([]byte, 2, 3), - make([]byte, 1, 1), - }, - wantLens: []int{3}, - wantGSO: []int{2}, - }, - { - name: "three messages second unequal len coalesce", - buffs: [][]byte{ - make([]byte, 2, 3), - make([]byte, 1, 1), - make([]byte, 2, 2), - }, - wantLens: []int{3, 2}, - wantGSO: []int{2, 0}, - }, - { - name: "three messages limited cap coalesce", - buffs: [][]byte{ - make([]byte, 2, 4), - make([]byte, 2, 2), - make([]byte, 2, 2), - }, - wantLens: []int{4, 2}, - wantGSO: []int{2, 0}, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - addr := &net.UDPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 1, - } - msgs := make([]ipv6.Message, len(tt.buffs)) - for i := range msgs { - msgs[i].Buffers = make([][]byte, 1) - msgs[i].OOB = make([]byte, 0, 2) - } - got := c.coalesceMessages(addr, tt.buffs, msgs) - if got != len(tt.wantLens) { - t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) - } - for i := range got { - if msgs[i].Addr != addr { - t.Errorf("msgs[%d].Addr != passed addr", i) - } - gotLen := len(msgs[i].Buffers[0]) - if gotLen != tt.wantLens[i] { - t.Errorf("len(msgs[%d].Buffers[0]) %d != %d", i, gotLen, tt.wantLens[i]) - } - gotGSO, err := getGSOSize(msgs[i].OOB) - if err != nil { - t.Fatalf("msgs[%d] getGSOSize err: %v", i, err) - } - if gotGSO != tt.wantGSO[i] { - t.Errorf("msgs[%d] gsoSize %d != %d", i, gotGSO, tt.wantGSO[i]) - } - } - }) - } -} - // newWireguard starts up a new wireguard-go device attached to a test tun, and // returns the device, tun and endpoint port. To add peers call device.IpcSet with UAPI instructions. func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Device, *tuntest.ChannelTUN, uint16) { @@ -3161,8 +2927,7 @@ func TestMaybeSetNearestDERP(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { ht := new(health.Tracker) - c := newConn() - c.logf = t.Logf + c := newConn(t.Logf) c.myDerp = tt.old c.derpMap = derpMap c.health = ht diff --git a/wgengine/magicsock/magicsock_windows.go b/wgengine/magicsock/magicsock_windows.go new file mode 100644 index 0000000000000..fe2a80e0ba951 --- /dev/null +++ b/wgengine/magicsock/magicsock_windows.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build windows + +package magicsock + +import ( + "net" + "unsafe" + + "golang.org/x/sys/windows" + "tailscale.com/types/logger" + "tailscale.com/types/nettype" +) + +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { + c, ok := pconn.(*net.UDPConn) + if !ok { + // not a UDP connection; nothing to do + return + } + + sysConn, err := c.SyscallConn() + if err != nil { + logf("trySetUDPSocketOptions: getting SyscallConn failed: %v", err) + return + } + + // Similar to https://github.com/golang/go/issues/5834 (which involved + // WSAECONNRESET), Windows can return a WSAENETRESET error, even on UDP + // reads. Disable this. + const SIO_UDP_NETRESET = windows.IOC_IN | windows.IOC_VENDOR | 15 + + var ioctlErr error + err = sysConn.Control(func(fd uintptr) { + ret := uint32(0) + flag := uint32(0) + size := uint32(unsafe.Sizeof(flag)) + ioctlErr = windows.WSAIoctl( + windows.Handle(fd), + SIO_UDP_NETRESET, // iocc + (*byte)(unsafe.Pointer(&flag)), // inbuf + size, // cbif + nil, // outbuf + 0, // cbob + &ret, // cbbr + nil, // overlapped + 0, // completionRoutine + ) + }) + if ioctlErr != nil { + logf("trySetUDPSocketOptions: could not set SIO_UDP_NETRESET: %v", ioctlErr) + } + if err != nil { + logf("trySetUDPSocketOptions: SyscallConn.Control failed: %v", err) + } +} diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index f1e47f3a8b261..c27abbadc9ced 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -35,12 +35,12 @@ type RebindingUDPConn struct { // setConnLocked sets the provided nettype.PacketConn. It should be called only // after acquiring RebindingUDPConn.mu. It upgrades the provided -// nettype.PacketConn to a *batchingUDPConn when appropriate. This upgrade -// is intentionally pushed closest to where read/write ops occur in order to -// avoid disrupting surrounding code that assumes nettype.PacketConn is a +// nettype.PacketConn to a batchingConn when appropriate. This upgrade is +// intentionally pushed closest to where read/write ops occur in order to avoid +// disrupting surrounding code that assumes nettype.PacketConn is a // *net.UDPConn. func (c *RebindingUDPConn) setConnLocked(p nettype.PacketConn, network string, batchSize int) { - upc := tryUpgradeToBatchingUDPConn(p, network, batchSize) + upc := tryUpgradeToBatchingConn(p, network, batchSize) c.pconn = upc c.pconnAtomic.Store(&upc) c.port = uint16(c.localAddrLocked().Port) @@ -74,7 +74,7 @@ func (c *RebindingUDPConn) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, e func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) error { for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(*batchingUDPConn) + b, ok := pconn.(batchingConn) if !ok { for _, buf := range buffs { _, err := c.writeToUDPAddrPortWithInitPconn(pconn, buf, addr) @@ -101,7 +101,7 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort) err func (c *RebindingUDPConn) ReadBatch(msgs []ipv6.Message, flags int) (int, error) { for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(*batchingUDPConn) + b, ok := pconn.(batchingConn) if !ok { n, ap, err := c.readFromWithInitPconn(pconn, msgs[0].Buffers[0]) if err == nil { diff --git a/wgengine/netstack/gro_default.go b/wgengine/netstack/gro_default.go new file mode 100644 index 0000000000000..ef4ff4b98df88 --- /dev/null +++ b/wgengine/netstack/gro_default.go @@ -0,0 +1,16 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios + +package netstack + +import ( + nsgro "gvisor.dev/gvisor/pkg/tcpip/stack/gro" +) + +// gro wraps a gVisor GRO implementation. It exists solely to prevent iOS from +// importing said package (see _ios.go). +type gro struct { + nsgro.GRO +} diff --git a/wgengine/netstack/gro_ios.go b/wgengine/netstack/gro_ios.go new file mode 100644 index 0000000000000..fb252f7db7658 --- /dev/null +++ b/wgengine/netstack/gro_ios.go @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios + +package netstack + +import ( + "gvisor.dev/gvisor/pkg/tcpip/stack" +) + +// gro on iOS delivers packets to its Dispatcher, immediately. This type exists +// to prevent importation of the gVisor GRO implementation as said package +// increases binary size. This is a penalty we do not wish to pay since we +// currently do not leverage GRO on iOS. +type gro struct { + Dispatcher stack.NetworkDispatcher +} + +func (g *gro) Init(v bool) { + if v { + panic("GRO is not supported on this platform") + } +} + +func (g *gro) Flush() {} + +func (g *gro) Enqueue(pkt *stack.PacketBuffer) { + g.Dispatcher.DeliverNetworkPacket(pkt.NetworkProtocolNumber, pkt) +} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go new file mode 100644 index 0000000000000..238e14cf0bff0 --- /dev/null +++ b/wgengine/netstack/link_endpoint.go @@ -0,0 +1,414 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netstack + +import ( + "bytes" + "context" + "sync" + + "github.com/tailscale/wireguard-go/tun" + "gvisor.dev/gvisor/pkg/buffer" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/header/parse" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/net/packet" + "tailscale.com/types/ipproto" +) + +type queue struct { + // TODO(jwhited): evaluate performance with mu as Mutex and/or alternative + // non-channel buffer. + c chan *stack.PacketBuffer + mu sync.RWMutex // mu guards closed + closed bool +} + +func (q *queue) Close() { + q.mu.Lock() + defer q.mu.Unlock() + if !q.closed { + close(q.c) + } + q.closed = true +} + +func (q *queue) Read() *stack.PacketBuffer { + select { + case p := <-q.c: + return p + default: + return nil + } +} + +func (q *queue) ReadContext(ctx context.Context) *stack.PacketBuffer { + select { + case pkt := <-q.c: + return pkt + case <-ctx.Done(): + return nil + } +} + +func (q *queue) Write(pkt *stack.PacketBuffer) tcpip.Error { + // q holds the PacketBuffer. + q.mu.RLock() + defer q.mu.RUnlock() + if q.closed { + return &tcpip.ErrClosedForSend{} + } + + wrote := false + select { + case q.c <- pkt.IncRef(): + wrote = true + default: + // TODO(jwhited): reconsider/count + pkt.DecRef() + } + + if wrote { + return nil + } + return &tcpip.ErrNoBufferSpace{} +} + +func (q *queue) Num() int { + return len(q.c) +} + +var _ stack.LinkEndpoint = (*linkEndpoint)(nil) +var _ stack.GSOEndpoint = (*linkEndpoint)(nil) + +// linkEndpoint implements stack.LinkEndpoint and stack.GSOEndpoint. Outbound +// packets written by gVisor towards Tailscale are stored in a channel. +// Inbound is fed to gVisor via injectInbound or enqueueGRO. This is loosely +// modeled after gvisor.dev/pkg/tcpip/link/channel.Endpoint. +type linkEndpoint struct { + SupportedGSOKind stack.SupportedGSO + initGRO initGRO + + mu sync.RWMutex // mu guards the following fields + dispatcher stack.NetworkDispatcher + linkAddr tcpip.LinkAddress + mtu uint32 + gro gro // mu only guards access to gro.Dispatcher + + q *queue // outbound +} + +// TODO(jwhited): move to linkEndpointOpts struct or similar. +type initGRO bool + +const ( + disableGRO initGRO = false + enableGRO initGRO = true +) + +func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, gro initGRO) *linkEndpoint { + le := &linkEndpoint{ + q: &queue{ + c: make(chan *stack.PacketBuffer, size), + }, + mtu: mtu, + linkAddr: linkAddr, + } + le.initGRO = gro + le.gro.Init(bool(gro)) + return le +} + +// Close closes l. Further packet injections will return an error, and all +// pending packets are discarded. Close may be called concurrently with +// WritePackets. +func (l *linkEndpoint) Close() { + l.mu.Lock() + if l.gro.Dispatcher != nil { + l.gro.Flush() + } + l.dispatcher = nil + l.gro.Dispatcher = nil + l.mu.Unlock() + l.q.Close() + l.Drain() +} + +// Read does non-blocking read one packet from the outbound packet queue. +func (l *linkEndpoint) Read() *stack.PacketBuffer { + return l.q.Read() +} + +// ReadContext does blocking read for one packet from the outbound packet queue. +// It can be cancelled by ctx, and in this case, it returns nil. +func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { + return l.q.ReadContext(ctx) +} + +// Drain removes all outbound packets from the channel and counts them. +func (l *linkEndpoint) Drain() int { + c := 0 + for pkt := l.Read(); pkt != nil; pkt = l.Read() { + pkt.DecRef() + c++ + } + return c +} + +// NumQueued returns the number of packets queued for outbound. +func (l *linkEndpoint) NumQueued() int { + return l.q.Num() +} + +// rxChecksumOffload validates IPv4, TCP, and UDP header checksums in p, +// returning an equivalent *stack.PacketBuffer if they are valid, otherwise nil. +// The set of headers validated covers where gVisor would perform validation if +// !stack.PacketBuffer.RXChecksumValidated, i.e. it satisfies +// stack.CapabilityRXChecksumOffload. Other protocols with checksum fields, +// e.g. ICMP{v6}, are still validated by gVisor regardless of rx checksum +// offloading capabilities. +func rxChecksumOffload(p *packet.Parsed) *stack.PacketBuffer { + var ( + pn tcpip.NetworkProtocolNumber + csumStart int + ) + buf := p.Buffer() + + switch p.IPVersion { + case 4: + if len(buf) < header.IPv4MinimumSize { + return nil + } + csumStart = int((buf[0] & 0x0F) * 4) + if csumStart < header.IPv4MinimumSize || csumStart > header.IPv4MaximumHeaderSize || len(buf) < csumStart { + return nil + } + if ^tun.Checksum(buf[:csumStart], 0) != 0 { + return nil + } + pn = header.IPv4ProtocolNumber + case 6: + if len(buf) < header.IPv6FixedHeaderSize { + return nil + } + csumStart = header.IPv6FixedHeaderSize + pn = header.IPv6ProtocolNumber + if p.IPProto != ipproto.ICMPv6 && p.IPProto != ipproto.TCP && p.IPProto != ipproto.UDP { + // buf could have extension headers before a UDP or TCP header, but + // packet.Parsed.IPProto will be set to the ext header type, so we + // have to look deeper. We are still responsible for validating the + // L4 checksum in this case. So, make use of gVisor's existing + // extension header parsing via parse.IPv6() in order to unpack the + // L4 csumStart index. This is not particularly efficient as we have + // to allocate a short-lived stack.PacketBuffer that cannot be + // re-used. parse.IPv6() "consumes" the IPv6 headers, so we can't + // inject this stack.PacketBuffer into the stack at a later point. + packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(bytes.Clone(buf)), + }) + defer packetBuf.DecRef() + // The rightmost bool returns false only if packetBuf is too short, + // which we've already accounted for above. + transportProto, _, _, _, _ := parse.IPv6(packetBuf) + if transportProto == header.TCPProtocolNumber || transportProto == header.UDPProtocolNumber { + csumLen := packetBuf.Data().Size() + if len(buf) < csumLen { + return nil + } + csumStart = len(buf) - csumLen + p.IPProto = ipproto.Proto(transportProto) + } + } + } + + if p.IPProto == ipproto.TCP || p.IPProto == ipproto.UDP { + lenForPseudo := len(buf) - csumStart + csum := tun.PseudoHeaderChecksum( + uint8(p.IPProto), + p.Src.Addr().AsSlice(), + p.Dst.Addr().AsSlice(), + uint16(lenForPseudo)) + csum = tun.Checksum(buf[csumStart:], csum) + if ^csum != 0 { + return nil + } + } + + packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(bytes.Clone(buf)), + }) + packetBuf.NetworkProtocolNumber = pn + // Setting this is not technically required. gVisor overrides where + // stack.CapabilityRXChecksumOffload is advertised from Capabilities(). + // https://github.com/google/gvisor/blob/64c016c92987cc04dfd4c7b091ddd21bdad875f8/pkg/tcpip/stack/nic.go#L763 + // This is also why we offload for all packets since we cannot signal this + // per-packet. + packetBuf.RXChecksumValidated = true + return packetBuf +} + +func (l *linkEndpoint) injectInbound(p *packet.Parsed) { + l.mu.RLock() + d := l.dispatcher + l.mu.RUnlock() + if d == nil { + return + } + pkt := rxChecksumOffload(p) + if pkt == nil { + return + } + d.DeliverNetworkPacket(pkt.NetworkProtocolNumber, pkt) + pkt.DecRef() +} + +// enqueueGRO enqueues the provided packet for GRO. It may immediately deliver +// it to the underlying stack.NetworkDispatcher depending on its contents and if +// GRO was initialized via newLinkEndpoint. To explicitly flush previously +// enqueued packets see flushGRO. enqueueGRO is not thread-safe and must not +// be called concurrently with flushGRO. +func (l *linkEndpoint) enqueueGRO(p *packet.Parsed) { + l.mu.RLock() + defer l.mu.RUnlock() + if l.gro.Dispatcher == nil { + return + } + pkt := rxChecksumOffload(p) + if pkt == nil { + return + } + // TODO(jwhited): gro.Enqueue() duplicates a lot of p.Decode(). + // We may want to push stack.PacketBuffer further up as a + // replacement for packet.Parsed, or inversely push packet.Parsed + // down into refactored GRO logic. + l.gro.Enqueue(pkt) + pkt.DecRef() +} + +// flushGRO flushes previously enqueueGRO'd packets to the underlying +// stack.NetworkDispatcher. flushGRO is not thread-safe, and must not be +// called concurrently with enqueueGRO. +func (l *linkEndpoint) flushGRO() { + if !l.initGRO { + // If GRO was not initialized fast path return to avoid scanning GRO + // buckets (see l.gro.Flush()) that will always be empty. + return + } + l.mu.RLock() + defer l.mu.RUnlock() + if l.gro.Dispatcher != nil { + l.gro.Flush() + } +} + +// Attach saves the stack network-layer dispatcher for use later when packets +// are injected. +func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { + l.mu.Lock() + defer l.mu.Unlock() + l.dispatcher = dispatcher + l.gro.Dispatcher = dispatcher +} + +// IsAttached implements stack.LinkEndpoint.IsAttached. +func (l *linkEndpoint) IsAttached() bool { + l.mu.RLock() + defer l.mu.RUnlock() + return l.dispatcher != nil +} + +// MTU implements stack.LinkEndpoint.MTU. +func (l *linkEndpoint) MTU() uint32 { + l.mu.RLock() + defer l.mu.RUnlock() + return l.mtu +} + +// SetMTU implements stack.LinkEndpoint.SetMTU. +func (l *linkEndpoint) SetMTU(mtu uint32) { + l.mu.Lock() + defer l.mu.Unlock() + l.mtu = mtu +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. +func (l *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { + // We are required to offload RX checksum validation for the purposes of + // GRO. + return stack.CapabilityRXChecksumOffload +} + +// GSOMaxSize implements stack.GSOEndpoint. +func (*linkEndpoint) GSOMaxSize() uint32 { + // This an increase from 32k returned by channel.Endpoint.GSOMaxSize() to + // 64k, which improves throughput. + return (1 << 16) - 1 +} + +// SupportedGSO implements stack.GSOEndpoint. +func (l *linkEndpoint) SupportedGSO() stack.SupportedGSO { + return l.SupportedGSOKind +} + +// MaxHeaderLength returns the maximum size of the link layer header. Given it +// doesn't have a header, it just returns 0. +func (*linkEndpoint) MaxHeaderLength() uint16 { + return 0 +} + +// LinkAddress returns the link address of this endpoint. +func (l *linkEndpoint) LinkAddress() tcpip.LinkAddress { + l.mu.RLock() + defer l.mu.RUnlock() + return l.linkAddr +} + +// SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress. +func (l *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { + l.mu.Lock() + defer l.mu.Unlock() + l.linkAddr = addr +} + +// WritePackets stores outbound packets into the channel. +// Multiple concurrent calls are permitted. +func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { + n := 0 + // TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a + // single packet. We can split 2 x 64K GSO across + // wireguard-go/conn.IdealBatchSize (128 slots) @ 1280 MTU, and non-GSO we + // could do more. Read API would need to change to take advantage. Verify + // gVisor limits around max number of segments packed together. Since we + // control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to + // ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage). + for _, pkt := range pkts.AsSlice() { + if err := l.q.Write(pkt); err != nil { + if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 { + return 0, err + } + break + } + n++ + } + + return n, nil +} + +// Wait implements stack.LinkEndpoint.Wait. +func (*linkEndpoint) Wait() {} + +// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType. +func (*linkEndpoint) ARPHardwareType() header.ARPHardwareType { + return header.ARPHardwareNone +} + +// AddHeader implements stack.LinkEndpoint.AddHeader. +func (*linkEndpoint) AddHeader(*stack.PacketBuffer) {} + +// ParseHeader implements stack.LinkEndpoint.ParseHeader. +func (*linkEndpoint) ParseHeader(*stack.PacketBuffer) bool { return true } + +// SetOnCloseAction implements stack.LinkEndpoint. +func (*linkEndpoint) SetOnCloseAction(func()) {} diff --git a/wgengine/netstack/link_endpoint_test.go b/wgengine/netstack/link_endpoint_test.go new file mode 100644 index 0000000000000..97bc9e70af5fd --- /dev/null +++ b/wgengine/netstack/link_endpoint_test.go @@ -0,0 +1,112 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netstack + +import ( + "bytes" + "net/netip" + "testing" + + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" + "tailscale.com/net/packet" +) + +func Test_rxChecksumOffload(t *testing.T) { + payloadLen := 100 + + tcpFields := &header.TCPFields{ + SrcPort: 1, + DstPort: 1, + SeqNum: 1, + AckNum: 1, + DataOffset: 20, + Flags: header.TCPFlagAck | header.TCPFlagPsh, + WindowSize: 3000, + } + tcp4 := make([]byte, 20+20+payloadLen) + ipv4H := header.IPv4(tcp4) + ipv4H.Encode(&header.IPv4Fields{ + SrcAddr: tcpip.AddrFromSlice(netip.MustParseAddr("192.0.2.1").AsSlice()), + DstAddr: tcpip.AddrFromSlice(netip.MustParseAddr("192.0.2.2").AsSlice()), + Protocol: uint8(header.TCPProtocolNumber), + TTL: 64, + TotalLength: uint16(len(tcp4)), + }) + ipv4H.SetChecksum(^ipv4H.CalculateChecksum()) + tcpH := header.TCP(tcp4[20:]) + tcpH.Encode(tcpFields) + pseudoCsum := header.PseudoHeaderChecksum(header.TCPProtocolNumber, ipv4H.SourceAddress(), ipv4H.DestinationAddress(), uint16(20+payloadLen)) + tcpH.SetChecksum(^tcpH.CalculateChecksum(pseudoCsum)) + + tcp6ExtHeader := make([]byte, 40+8+20+payloadLen) + ipv6H := header.IPv6(tcp6ExtHeader) + ipv6H.Encode(&header.IPv6Fields{ + SrcAddr: tcpip.AddrFromSlice(netip.MustParseAddr("2001:db8::1").AsSlice()), + DstAddr: tcpip.AddrFromSlice(netip.MustParseAddr("2001:db8::2").AsSlice()), + TransportProtocol: 60, // really next header; destination options ext header + HopLimit: 64, + PayloadLength: uint16(8 + 20 + payloadLen), + }) + tcp6ExtHeader[40] = uint8(header.TCPProtocolNumber) // next header + tcp6ExtHeader[41] = 0 // length of ext header in 8-octet units, exclusive of first 8 octets. + // 42-47 options and padding + tcpH = header.TCP(tcp6ExtHeader[48:]) + tcpH.Encode(tcpFields) + pseudoCsum = header.PseudoHeaderChecksum(header.TCPProtocolNumber, ipv6H.SourceAddress(), ipv6H.DestinationAddress(), uint16(20+payloadLen)) + tcpH.SetChecksum(^tcpH.CalculateChecksum(pseudoCsum)) + + tcp4InvalidCsum := make([]byte, len(tcp4)) + copy(tcp4InvalidCsum, tcp4) + at := 20 + 16 + tcp4InvalidCsum[at] = ^tcp4InvalidCsum[at] + + tcp6ExtHeaderInvalidCsum := make([]byte, len(tcp6ExtHeader)) + copy(tcp6ExtHeaderInvalidCsum, tcp6ExtHeader) + at = 40 + 8 + 16 + tcp6ExtHeaderInvalidCsum[at] = ^tcp6ExtHeaderInvalidCsum[at] + + tests := []struct { + name string + input []byte + wantPB bool + }{ + { + "tcp4 packet valid csum", + tcp4, + true, + }, + { + "tcp6 with ext header valid csum", + tcp6ExtHeader, + true, + }, + { + "tcp4 packet invalid csum", + tcp4InvalidCsum, + false, + }, + { + "tcp6 with ext header invalid csum", + tcp6ExtHeaderInvalidCsum, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &packet.Parsed{} + p.Decode(tt.input) + got := rxChecksumOffload(p) + if tt.wantPB != (got != nil) { + t.Fatalf("wantPB = %v != (got != nil): %v", tt.wantPB, got != nil) + } + if tt.wantPB { + gotBuf := got.ToBuffer() + if !bytes.Equal(tt.input, gotBuf.Flatten()) { + t.Fatal("output packet unequal to input") + } + } + }) + } +} diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 4d08a20edc260..3c8a357d96ba3 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -5,7 +5,6 @@ package netstack import ( - "bytes" "context" "errors" "expvar" @@ -21,12 +20,10 @@ import ( "sync/atomic" "time" - "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/refs" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/pkg/tcpip/link/channel" "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" "gvisor.dev/gvisor/pkg/tcpip/network/ipv6" "gvisor.dev/gvisor/pkg/tcpip/stack" @@ -176,7 +173,7 @@ type Impl struct { ProcessSubnets bool ipstack *stack.Stack - linkEP *channel.Endpoint + linkEP *linkEndpoint tundev *tstun.Wrapper e wgengine.Engine pm *proxymap.Mapper @@ -245,6 +242,44 @@ const nicID = 1 // have a UDP packet as big as the MTU. const maxUDPPacketSize = tstun.MaxPacketSize +func setTCPBufSizes(ipstack *stack.Stack) error { + // tcpip.TCP{Receive,Send}BufferSizeRangeOption is gVisor's version of + // Linux's tcp_{r,w}mem. Application within gVisor differs as some Linux + // features are not (yet) implemented, and socket buffer memory is not + // controlled within gVisor, e.g. we allocate *stack.PacketBuffer's for the + // write path within Tailscale. Therefore, we loosen our understanding of + // the relationship between these Linux and gVisor tunables. The chosen + // values are biased towards higher throughput on high bandwidth-delay + // product paths, except on memory-constrained platforms. + tcpRXBufOpt := tcpip.TCPReceiveBufferSizeRangeOption{ + // Min is unused by gVisor at the time of writing, but partially plumbed + // for application by the TCP_WINDOW_CLAMP socket option. + Min: tcpRXBufMinSize, + // Default is used by gVisor at socket creation. + Default: tcpRXBufDefSize, + // Max is used by gVisor to cap the advertised receive window post-read. + // (tcp_moderate_rcvbuf=true, the default). + Max: tcpRXBufMaxSize, + } + tcpipErr := ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpRXBufOpt) + if tcpipErr != nil { + return fmt.Errorf("could not set TCP RX buf size: %v", tcpipErr) + } + tcpTXBufOpt := tcpip.TCPSendBufferSizeRangeOption{ + // Min in unused by gVisor at the time of writing. + Min: tcpTXBufMinSize, + // Default is used by gVisor at socket creation. + Default: tcpTXBufDefSize, + // Max is used by gVisor to cap the send window. + Max: tcpTXBufMaxSize, + } + tcpipErr = ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &tcpTXBufOpt) + if tcpipErr != nil { + return fmt.Errorf("could not set TCP TX buf size: %v", tcpipErr) + } + return nil +} + // Create creates and populates a new Impl. func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magicsock.Conn, dialer *tsdial.Dialer, dns *dns.Manager, pm *proxymap.Mapper, driveForLocal drive.FileSystemForLocal) (*Impl, error) { if mc == nil { @@ -285,7 +320,19 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi return nil, fmt.Errorf("could not disable TCP RACK: %v", tcpipErr) } } - linkEP := channel.New(512, uint32(tstun.DefaultTUNMTU()), "") + err := setTCPBufSizes(ipstack) + if err != nil { + return nil, err + } + var linkEP *linkEndpoint + if runtime.GOOS == "linux" { + // TODO(jwhited): add Windows GSO support https://github.com/tailscale/corp/issues/21874 + // TODO(jwhited): exercise enableGRO in relation to https://github.com/tailscale/corp/issues/22353 + linkEP = newLinkEndpoint(512, uint32(tstun.DefaultTUNMTU()), "", disableGRO) + linkEP.SupportedGSOKind = stack.HostGSOSupported + } else { + linkEP = newLinkEndpoint(512, uint32(tstun.DefaultTUNMTU()), "", disableGRO) + } if tcpipProblem := ipstack.CreateNIC(nicID, linkEP); tcpipProblem != nil { return nil, fmt.Errorf("could not create netstack NIC: %v", tcpipProblem) } @@ -333,6 +380,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi ns.ctx, ns.ctxCancel = context.WithCancel(context.Background()) ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) ns.tundev.PostFilterPacketInboundFromWireGuard = ns.injectInbound + ns.tundev.EndPacketVectorInboundFromWireGuardFlush = linkEP.flushGRO ns.tundev.PreFilterPacketOutboundToWireGuardNetstackIntercept = ns.handleLocalPackets stacksForMetrics.Store(ns, struct{}{}) return ns, nil @@ -509,9 +557,7 @@ func (ns *Impl) Start(lb *ipnlocal.LocalBackend) error { panic("nil LocalBackend") } ns.lb = lb - // size = 0 means use default buffer size - const tcpReceiveBufferSize = 0 - tcpFwd := tcp.NewForwarder(ns.ipstack, tcpReceiveBufferSize, maxInFlightConnectionAttempts(), ns.acceptTCP) + tcpFwd := tcp.NewForwarder(ns.ipstack, tcpRXBufDefSize, maxInFlightConnectionAttempts(), ns.acceptTCP) udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDP) ns.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, ns.wrapTCPProtocolHandler(tcpFwd.HandlePacket)) ns.ipstack.SetTransportProtocolHandler(udp.ProtocolNumber, ns.wrapUDPProtocolHandler(udpFwd.HandlePacket)) @@ -734,23 +780,11 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Re // care about the packet; resume processing. return filter.Accept } - - var pn tcpip.NetworkProtocolNumber - switch p.IPVersion { - case 4: - pn = header.IPv4ProtocolNumber - case 6: - pn = header.IPv6ProtocolNumber - } if debugPackets { ns.logf("[v2] service packet in (from %v): % x", p.Src, p.Buffer()) } - packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ - Payload: buffer.MakeWithData(bytes.Clone(p.Buffer())), - }) - ns.linkEP.InjectInbound(pn, packetBuf) - packetBuf.DecRef() + ns.linkEP.injectInbound(p) return filter.DropSilently } @@ -791,7 +825,7 @@ func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet. func (ns *Impl) inject() { for { pkt := ns.linkEP.ReadContext(ns.ctx) - if pkt.IsNil() { + if pkt == nil { if ns.ctx.Err() != nil { // Return without logging. return @@ -1035,21 +1069,10 @@ func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper) filter.Respons return filter.DropSilently } - var pn tcpip.NetworkProtocolNumber - switch p.IPVersion { - case 4: - pn = header.IPv4ProtocolNumber - case 6: - pn = header.IPv6ProtocolNumber - } if debugPackets { ns.logf("[v2] packet in (from %v): % x", p.Src, p.Buffer()) } - packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{ - Payload: buffer.MakeWithData(bytes.Clone(p.Buffer())), - }) - ns.linkEP.InjectInbound(pn, packetBuf) - packetBuf.DecRef() + ns.linkEP.enqueueGRO(p) // We've now delivered this to netstack, so we're done. // Instead of returning a filter.Accept here (which would also @@ -1116,11 +1139,9 @@ func (ns *Impl) shouldHandlePing(p *packet.Parsed) (_ netip.Addr, ok bool) { func netaddrIPFromNetstackIP(s tcpip.Address) netip.Addr { switch s.Len() { case 4: - s := s.As4() - return netaddr.IPv4(s[0], s[1], s[2], s[3]) + return netip.AddrFrom4(s.As4()) case 16: - s := s.As16() - return netip.AddrFrom16(s).Unmap() + return netip.AddrFrom16(s.As16()).Unmap() } return netip.Addr{} } diff --git a/wgengine/netstack/netstack_tcpbuf_default.go b/wgengine/netstack/netstack_tcpbuf_default.go new file mode 100644 index 0000000000000..3640964ffe399 --- /dev/null +++ b/wgengine/netstack/netstack_tcpbuf_default.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios + +package netstack + +import ( + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" +) + +const ( + tcpRXBufMinSize = tcp.MinBufferSize + tcpRXBufDefSize = tcp.DefaultSendBufferSize + tcpRXBufMaxSize = 8 << 20 // 8MiB + + tcpTXBufMinSize = tcp.MinBufferSize + tcpTXBufDefSize = tcp.DefaultReceiveBufferSize + tcpTXBufMaxSize = 6 << 20 // 6MiB +) diff --git a/wgengine/netstack/netstack_tcpbuf_ios.go b/wgengine/netstack/netstack_tcpbuf_ios.go new file mode 100644 index 0000000000000..a4210c9ac7517 --- /dev/null +++ b/wgengine/netstack/netstack_tcpbuf_ios.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios + +package netstack + +import ( + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" +) + +const ( + // tcp{RX,TX}Buf{Min,Def,Max}Size mirror gVisor defaults. We leave these + // unchanged on iOS for now as to not increase pressure towards the + // NetworkExtension memory limit. + // TODO(jwhited): test memory/throughput impact of collapsing to values in _default.go + tcpRXBufMinSize = tcp.MinBufferSize + tcpRXBufDefSize = tcp.DefaultSendBufferSize + tcpRXBufMaxSize = tcp.MaxBufferSize + + tcpTXBufMinSize = tcp.MinBufferSize + tcpTXBufDefSize = tcp.DefaultReceiveBufferSize + tcpTXBufMaxSize = tcp.MaxBufferSize +) diff --git a/wgengine/router/ifconfig_windows.go b/wgengine/router/ifconfig_windows.go index 779acc881da6a..40e9dc6e0cdfd 100644 --- a/wgengine/router/ifconfig_windows.go +++ b/wgengine/router/ifconfig_windows.go @@ -320,12 +320,24 @@ func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (re ipif6 = nil } - // Windows requires routes to have a nexthop. For routes such as - // ours where the nexthop is meaningless, you're supposed to use - // one of the local IP addresses of the interface. Find an IPv4 - // and IPv6 address we can use for this purpose. - var firstGateway4 netip.Addr - var firstGateway6 netip.Addr + // Windows requires routes to have a nexthop. Routes created using + // the interface's local IP address or an unspecified IP address + // ("0.0.0.0" or "::") as the nexthop are considered on-link routes. + // + // Notably, Windows treats on-link subnet routes differently, reserving the last + // IP in the range as the broadcast IP and therefore prohibiting TCP connections + // to it, resulting in WSA error 10049: "The requested address is not valid in its context." + // This does not happen with single-host routes, such as routes to Tailscale IP addresses, + // but becomes a problem with advertised subnets when all IPs in the range should be reachable. + // See https://github.com/tailscale/support-escalations/issues/57 for details. + // + // For routes such as ours where the nexthop is meaningless, we can use an + // arbitrary nexthop address, such as TailscaleServiceIP, to prevent the + // routes from being marked as on-link. We can still create on-link routes + // for single-host Tailscale routes, but we shouldn't attempt to create a + // route for the interface's own IP. + var localAddr4, localAddr6 netip.Addr + var gatewayAddr4, gatewayAddr6 netip.Addr addresses := make([]netip.Prefix, 0, len(cfg.LocalAddrs)) for _, addr := range cfg.LocalAddrs { if (addr.Addr().Is4() && ipif4 == nil) || (addr.Addr().Is6() && ipif6 == nil) { @@ -333,10 +345,12 @@ func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (re continue } addresses = append(addresses, addr) - if addr.Addr().Is4() && !firstGateway4.IsValid() { - firstGateway4 = addr.Addr() - } else if addr.Addr().Is6() && !firstGateway6.IsValid() { - firstGateway6 = addr.Addr() + if addr.Addr().Is4() && !gatewayAddr4.IsValid() { + localAddr4 = addr.Addr() + gatewayAddr4 = tsaddr.TailscaleServiceIP() + } else if addr.Addr().Is6() && !gatewayAddr6.IsValid() { + localAddr6 = addr.Addr() + gatewayAddr6 = tsaddr.TailscaleServiceIPv6() } } @@ -349,7 +363,7 @@ func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (re continue } - if route.Addr().Is6() && !firstGateway6.IsValid() { + if route.Addr().Is6() && !gatewayAddr6.IsValid() { // Windows won't let us set IPv6 routes without having an // IPv6 local address set. However, when we've configured // a default route, we want to forcibly grab IPv6 traffic @@ -358,43 +372,51 @@ func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (re // route source. ip := tsaddr.Tailscale4To6Placeholder() addresses = append(addresses, netip.PrefixFrom(ip, ip.BitLen())) - firstGateway6 = ip - } else if route.Addr().Is4() && !firstGateway4.IsValid() { + gatewayAddr6 = ip + } else if route.Addr().Is4() && !gatewayAddr4.IsValid() { // TODO: do same dummy behavior as v6? return errors.New("due to a Windows limitation, one cannot have interface routes without an interface address") } - var gateway netip.Addr + var gateway, localAddr netip.Addr if route.Addr().Is4() { - gateway = firstGateway4 + localAddr = localAddr4 + gateway = gatewayAddr4 } else if route.Addr().Is6() { - gateway = firstGateway6 + localAddr = localAddr6 + gateway = gatewayAddr6 } - r := &routeData{ - RouteData: winipcfg.RouteData{ - Destination: route, - NextHop: gateway, - Metric: 0, - }, - } - if r.Destination.Addr().Unmap() == gateway { + + switch destAddr := route.Addr().Unmap(); { + case destAddr == localAddr: // no need to add a route for the interface's // own IP. The kernel does that for us. // If we try to replace it, we'll fail to // add the route unless NextHop is set, but // then the interface's IP won't be pingable. continue + case route.IsSingleIP() && (destAddr == gateway || tsaddr.IsTailscaleIP(destAddr)): + // add an on-link route if the destination + // is the nexthop itself or a single Tailscale IP. + gateway = localAddr } + + r := &routeData{ + RouteData: winipcfg.RouteData{ + Destination: route, + NextHop: gateway, + Metric: 0, + }, + } + if route.Addr().Is4() { if route.Bits() == 0 { foundDefault4 = true } - r.NextHop = firstGateway4 } else if route.Addr().Is6() { if route.Bits() == 0 { foundDefault6 = true } - r.NextHop = firstGateway6 } routes = append(routes, r) } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 6399476c8eeb9..1a3c7637fee00 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -374,7 +374,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) e.logf("onPortUpdate(port=%v, network=%s)", port, network) if err := e.router.UpdateMagicsockPort(port, network); err != nil { - e.logf("UpdateMagicsockPort(port=%v, network=%s) failed: %w", port, network, err) + e.logf("UpdateMagicsockPort(port=%v, network=%s) failed: %v", port, network, err) } } magicsockOpts := magicsock.Options{ diff --git a/words/scales.txt b/words/scales.txt index e1394fc99a647..f27dfc5c4aa36 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -147,6 +147,8 @@ delta diatonic diminished dominant +solfege +solfeggio dorian enigmatic freygish diff --git a/words/tails.txt b/words/tails.txt index 83a4cf544ab1f..4975332419855 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -692,3 +692,5 @@ azules tabby ussuri kitty +tanuki +neko