diff --git a/VERSION.txt b/VERSION.txt index 80627411dcee7..32a6ce3c719b4 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.74.1 +1.76.0 diff --git a/api.md b/api.md index 08fb4be2878bc..9f14e0636a8f7 100644 --- a/api.md +++ b/api.md @@ -1,104 +1,2 @@ > [!IMPORTANT] > The Tailscale API documentation has moved to https://tailscale.com/api - -# Tailscale API - -The Tailscale API documentation is located in **[tailscale/publicapi](./publicapi/readme.md#tailscale-api)**. - -# APIs - -**[Overview](./publicapi/readme.md)** - -**[Device](./publicapi/device.md#device)** - - - - - - - - - - -- Get a device: [`GET /api/v2/device/{deviceid}`](./publicapi/device.md#get-device) -- Delete a device: [`DELETE /api/v2/device/{deviceID}`](./publicapi/device.md#delete-device) -- Expire device key: [`POST /api/v2/device/{deviceID}/expire`](./publicapi/device.md#expire-device-key) -- [**Routes**](./publicapi/device.md#routes) - - Get device routes: [`GET /api/v2/device/{deviceID}/routes`](./publicapi/device.md#get-device-routes) - - Set device routes: [`POST /api/v2/device/{deviceID}/routes`](./publicapi/device.md#set-device-routes) -- [**Authorize**](./publicapi/device.md#authorize) - - Authorize a device: [`POST /api/v2/device/{deviceID}/authorized`](./publicapi/device.md#authorize-device) -- [**Tags**](./publicapi/device.md#tags) - - Update tags: [`POST /api/v2/device/{deviceID}/tags`](./publicapi/device.md#update-device-tags) -- [**Keys**](./publicapi/device.md#keys) - - Update device key: [`POST /api/v2/device/{deviceID}/key`](./publicapi/device.md#update-device-key) -- [**IP Addresses**](./publicapi/device.md#ip-addresses) - - Set device IPv4 address: [`POST /api/v2/device/{deviceID}/ip`](./publicapi/device.md#set-device-ipv4-address) -- [**Device posture attributes**](./publicapi/device.md#device-posture-attributes) - - Get device posture attributes: [`GET /api/v2/device/{deviceID}/attributes`](./publicapi/device.md#get-device-posture-attributes) - - Set custom device posture attributes: [`POST /api/v2/device/{deviceID}/attributes/{attributeKey}`](./publicapi/device.md#set-device-posture-attributes) - - Delete custom device posture attributes: [`DELETE /api/v2/device/{deviceID}/attributes/{attributeKey}`](./publicapi/device.md#delete-custom-device-posture-attributes) -- [**Device invites**](./publicapi/device.md#invites-to-a-device) - - List device invites: [`GET /api/v2/device/{deviceID}/device-invites`](./publicapi/device.md#list-device-invites) - - Create device invites: [`POST /api/v2/device/{deviceID}/device-invites`](./publicapi/device.md#create-device-invites) - -**[Tailnet](./publicapi/tailnet.md#tailnet)** - - - - - - - - - - - - - - - - - -- [**Policy File**](./publicapi/tailnet.md#policy-file) - - Get policy file: [`GET /api/v2/tailnet/{tailnet}/acl`](./publicapi/tailnet.md#get-policy-file) - - Update policy file: [`POST /api/v2/tailnet/{tailnet}/acl`](./publicapi/tailnet.md#update-policy-file) - - Preview rule matches: [`POST /api/v2/tailnet/{tailnet}/acl/preview`](./publicapi/tailnet.md#preview-policy-file-rule-matches) - - Validate and test policy file: [`POST /api/v2/tailnet/{tailnet}/acl/validate`](./publicapi/tailnet.md#validate-and-test-policy-file) -- [**Devices**](./publicapi/tailnet.md#devices) - - List tailnet devices: [`GET /api/v2/tailnet/{tailnet}/devices`](./publicapi/tailnet.md#list-tailnet-devices) -- [**Keys**](./publicapi/tailnet.md#tailnet-keys) - - List tailnet keys: [`GET /api/v2/tailnet/{tailnet}/keys`](./publicapi/tailnet.md#list-tailnet-keys) - - Create an auth key: [`POST /api/v2/tailnet/{tailnet}/keys`](./publicapi/tailnet.md#create-auth-key) - - Get a key: [`GET /api/v2/tailnet/{tailnet}/keys/{keyid}`](./publicapi/tailnet.md#get-key) - - Delete a key: [`DELETE /api/v2/tailnet/{tailnet}/keys/{keyid}`](./publicapi/tailnet.md#delete-key) -- [**DNS**](./publicapi/tailnet.md#dns) - - [**Nameservers**](./publicapi/tailnet.md#nameservers) - - Get nameservers: [`GET /api/v2/tailnet/{tailnet}/dns/nameservers`](./publicapi/tailnet.md#get-nameservers) - - Set nameservers: [`POST /api/v2/tailnet/{tailnet}/dns/nameservers`](./publicapi/tailnet.md#set-nameservers) - - [**Preferences**](./publicapi/tailnet.md#preferences) - - Get DNS preferences: [`GET /api/v2/tailnet/{tailnet}/dns/preferences`](./publicapi/tailnet.md#get-dns-preferences) - - Set DNS preferences: [`POST /api/v2/tailnet/{tailnet}/dns/preferences`](./publicapi/tailnet.md#set-dns-preferences) - - [**Search Paths**](./publicapi/tailnet.md#search-paths) - - Get search paths: [`GET /api/v2/tailnet/{tailnet}/dns/searchpaths`](./publicapi/tailnet.md#get-search-paths) - - Set search paths: [`POST /api/v2/tailnet/{tailnet}/dns/searchpaths`](./publicapi/tailnet.md#set-search-paths) - - [**Split DNS**](./publicapi/tailnet.md#split-dns) - - Get split DNS: [`GET /api/v2/tailnet/{tailnet}/dns/split-dns`](./publicapi/tailnet.md#get-split-dns) - - Update split DNS: [`PATCH /api/v2/tailnet/{tailnet}/dns/split-dns`](./publicapi/tailnet.md#update-split-dns) - - Set split DNS: [`PUT /api/v2/tailnet/{tailnet}/dns/split-dns`](./publicapi/tailnet.md#set-split-dns) -- [**User invites**](./publicapi/tailnet.md#tailnet-user-invites) - - List user invites: [`GET /api/v2/tailnet/{tailnet}/user-invites`](./publicapi/tailnet.md#list-user-invites) - - Create user invites: [`POST /api/v2/tailnet/{tailnet}/user-invites`](./publicapi/tailnet.md#create-user-invites) - -**[User invites](./publicapi/userinvites.md#user-invites)** - -- Get user invite: [`GET /api/v2/user-invites/{userInviteId}`](./publicapi/userinvites.md#get-user-invite) -- Delete user invite: [`DELETE /api/v2/user-invites/{userInviteId}`](./publicapi/userinvites.md#delete-user-invite) -- Resend user invite (by email): [`POST /api/v2/user-invites/{userInviteId}/resend`](#resend-user-invite) - -**[Device invites](./publicapi/deviceinvites.md#device-invites)** - -- Get device invite: [`GET /api/v2/device-invites/{deviceInviteId}`](./publicapi/deviceinvites.md#get-device-invite) -- Delete device invite: [`DELETE /api/v2/device-invites/{deviceInviteId}`](./publicapi/deviceinvites.md#delete-device-invite) -- Resend device invite (by email): [`POST /api/v2/device-invites/{deviceInviteId}/resend`](./publicapi/deviceinvites.md#resend-device-invite) -- Accept device invite [`POST /api/v2/device-invites/-/accept`](#accept-device-invite) diff --git a/assert_ts_toolchain_match.go b/assert_ts_toolchain_match.go new file mode 100644 index 0000000000000..40b24b334674f --- /dev/null +++ b/assert_ts_toolchain_match.go @@ -0,0 +1,27 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build tailscale_go + +package tailscaleroot + +import ( + "fmt" + "os" + "strings" +) + +func init() { + tsRev, ok := tailscaleToolchainRev() + if !ok { + panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info") + } + want := strings.TrimSpace(GoToolchainRev) + if tsRev != want { + if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { + fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want) + return + } + panic(fmt.Sprintf("binary built with tailscale_go build tag but Go toolchain %q doesn't match github.com/tailscale/tailscale expected value %q; override this failure with TS_PERMIT_TOOLCHAIN_MISMATCH=1", tsRev, want)) + } +} diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 81879aac31ded..b1c273a4fd462 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -4,7 +4,10 @@ // Package apitype contains types for the Tailscale LocalAPI and control plane API. package apitype -import "tailscale.com/tailcfg" +import ( + "tailscale.com/tailcfg" + "tailscale.com/types/dnstype" +) // LocalAPIHost is the Host header value used by the LocalAPI. const LocalAPIHost = "local-tailscaled.sock" @@ -65,3 +68,11 @@ type DNSOSConfig struct { SearchDomains []string MatchDomains []string } + +// DNSQueryResponse is the response to a DNS query request sent via LocalAPI. +type DNSQueryResponse struct { + // Bytes is the raw DNS response bytes. + Bytes []byte + // Resolvers is the list of resolvers that the forwarder deemed able to resolve the query. + Resolvers []*dnstype.Resolver +} diff --git a/client/tailscale/localclient.go b/client/tailscale/localclient.go index 29e28a1549a7d..df51dc1cab52c 100644 --- a/client/tailscale/localclient.go +++ b/client/tailscale/localclient.go @@ -37,6 +37,7 @@ import ( "tailscale.com/safesocket" "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/tkatype" ) @@ -813,6 +814,8 @@ func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn return decodeJSON[*ipn.Prefs](body) } +// GetDNSOSConfig returns the system DNS configuration for the current device. +// That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") if err != nil { @@ -825,6 +828,21 @@ func (lc *LocalClient) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig return &osCfg, nil } +// QueryDNS executes a DNS query for a name (`google.com.`) and query type (`CNAME`). +// It returns the raw DNS response bytes and the resolvers that were used to answer the query +// (often just one, but can be more if we raced multiple resolvers). +func (lc *LocalClient) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { + body, err := lc.get200(ctx, fmt.Sprintf("/localapi/v0/dns-query?name=%s&type=%s", url.QueryEscape(name), queryType)) + if err != nil { + return nil, nil, err + } + var res apitype.DNSQueryResponse + if err := json.Unmarshal(body, &res); err != nil { + return nil, nil, fmt.Errorf("invalid query response: %w", err) + } + return res.Bytes, res.Resolvers, nil +} + // StartLoginInteractive starts an interactive login. func (lc *LocalClient) StartLoginInteractive(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/login-interactive", http.StatusNoContent, nil) diff --git a/client/web/web.go b/client/web/web.go index b914070b25af0..04ba2d086334a 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -17,7 +17,6 @@ import ( "os" "path" "path/filepath" - "slices" "strings" "sync" "time" @@ -35,6 +34,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/types/views" "tailscale.com/util/httpm" "tailscale.com/version" "tailscale.com/version/distro" @@ -113,11 +113,6 @@ const ( ManageServerMode ServerMode = "manage" ) -var ( - exitNodeRouteV4 = netip.MustParsePrefix("0.0.0.0/0") - exitNodeRouteV6 = netip.MustParsePrefix("::/0") -) - // ServerOpts contains options for constructing a new Server. type ServerOpts struct { // Mode specifies the mode of web client being constructed. @@ -927,10 +922,10 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { return p == route }) } - data.AdvertisingExitNodeApproved = routeApproved(exitNodeRouteV4) || routeApproved(exitNodeRouteV6) + data.AdvertisingExitNodeApproved = routeApproved(tsaddr.AllIPv4()) || routeApproved(tsaddr.AllIPv6()) for _, r := range prefs.AdvertiseRoutes { - if r == exitNodeRouteV4 || r == exitNodeRouteV6 { + if tsaddr.IsExitRoute(r) { data.AdvertisingExitNode = true } else { data.AdvertisedRoutes = append(data.AdvertisedRoutes, subnetRoute{ @@ -1071,7 +1066,7 @@ func (s *Server) servePostRoutes(ctx context.Context, data postRoutesRequest) er var currNonExitRoutes []string var currAdvertisingExitNode bool for _, r := range prefs.AdvertiseRoutes { - if r == exitNodeRouteV4 || r == exitNodeRouteV6 { + if tsaddr.IsExitRoute(r) { currAdvertisingExitNode = true continue } @@ -1092,12 +1087,7 @@ func (s *Server) servePostRoutes(ctx context.Context, data postRoutesRequest) er return err } - hasExitNodeRoute := func(all []netip.Prefix) bool { - return slices.Contains(all, exitNodeRouteV4) || - slices.Contains(all, exitNodeRouteV6) - } - - if !data.UseExitNode.IsZero() && hasExitNodeRoute(routes) { + if !data.UseExitNode.IsZero() && tsaddr.ContainsExitRoutes(views.SliceOf(routes)) { return errors.New("cannot use and advertise exit node at same time") } diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 67edce05b8107..7fa84d67f9d8a 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,11 +27,8 @@ import ( "strconv" "strings" - "github.com/google/uuid" - "tailscale.com/clientupdate/distsign" "tailscale.com/types/logger" "tailscale.com/util/cmpver" - "tailscale.com/util/winutil" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -756,164 +753,6 @@ func (up *Updater) updateMacAppStore() error { return nil } -const ( - // winMSIEnv is the environment variable that, if set, is the MSI file for - // the update command to install. It's passed like this so we can stop the - // tailscale.exe process from running before the msiexec process runs and - // tries to overwrite ourselves. - winMSIEnv = "TS_UPDATE_WIN_MSI" - // winExePathEnv is the environment variable that is set along with - // winMSIEnv and carries the full path of the calling tailscale.exe binary. - // It is used to re-launch the GUI process (tailscale-ipn.exe) after - // install is complete. - winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" -) - -var ( - verifyAuthenticode func(string) error // set non-nil only on Windows - markTempFileFunc func(string) error // set non-nil only on Windows -) - -func (up *Updater) updateWindows() error { - if msi := os.Getenv(winMSIEnv); msi != "" { - // stdout/stderr from this part of the install could be lost since the - // parent tailscaled is replaced. Create a temp log file to have some - // output to debug with in case update fails. - close, err := up.switchOutputToFile() - if err != nil { - up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err) - } else { - defer close.Close() - } - - up.Logf("installing %v ...", msi) - if err := up.installMSI(msi); err != nil { - up.Logf("MSI install failed: %v", err) - return err - } - - up.Logf("success.") - return nil - } - - if !winutil.IsCurrentProcessElevated() { - return errors.New(`update must be run as Administrator - -you can run the command prompt as Administrator one of these ways: -* right-click cmd.exe, select 'Run as administrator' -* press Windows+x, then press a -* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`) - } - ver, err := requestedTailscaleVersion(up.Version, up.Track) - if err != nil { - return err - } - arch := runtime.GOARCH - if arch == "386" { - arch = "x86" - } - if !up.confirm(ver) { - return nil - } - - tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale") - msiDir := filepath.Join(tsDir, "MSICache") - if fi, err := os.Stat(tsDir); err != nil { - return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err) - } else if !fi.IsDir() { - return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode()) - } - if err := os.MkdirAll(msiDir, 0700); err != nil { - return err - } - up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) - pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) - msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) - if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { - return err - } - - up.Logf("verifying MSI authenticode...") - if err := verifyAuthenticode(msiTarget); err != nil { - return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err) - } - up.Logf("authenticode verification succeeded") - - up.Logf("making tailscale.exe copy to switch to...") - up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe")) - selfOrig, selfCopy, err := makeSelfCopy() - if err != nil { - return err - } - defer os.Remove(selfCopy) - up.Logf("running tailscale.exe copy for final install...") - - cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig) - cmd.Stdout = up.Stderr - cmd.Stderr = up.Stderr - cmd.Stdin = os.Stdin - if err := cmd.Start(); err != nil { - return err - } - // Once it's started, exit ourselves, so the binary is free - // to be replaced. - os.Exit(0) - panic("unreachable") -} - -func (up *Updater) switchOutputToFile() (io.Closer, error) { - var logFilePath string - exePath, err := os.Executable() - if err != nil { - logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log") - } else { - logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log" - } - - up.Logf("writing update output to %q", logFilePath) - logFile, err := os.Create(logFilePath) - if err != nil { - return nil, err - } - - up.Logf = func(m string, args ...any) { - fmt.Fprintf(logFile, m+"\n", args...) - } - up.Stdout = logFile - up.Stderr = logFile - return logFile, nil -} - -func (up *Updater) installMSI(msi string) error { - var err error - for tries := 0; tries < 2; tries++ { - cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn") - cmd.Dir = filepath.Dir(msi) - cmd.Stdout = up.Stdout - cmd.Stderr = up.Stderr - cmd.Stdin = os.Stdin - err = cmd.Run() - if err == nil { - break - } - up.Logf("Install attempt failed: %v", err) - uninstallVersion := up.currentVersion - if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" { - uninstallVersion = v - } - // Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first. - up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion) - cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn") - cmd.Stdout = up.Stdout - cmd.Stderr = up.Stderr - cmd.Stdin = os.Stdin - err = cmd.Run() - up.Logf("msiexec uninstall: %v", err) - } - return err -} - // cleanupOldDownloads removes all files matching glob (see filepath.Glob). // Only regular files are removed, so the glob must match specific files and // not directories. @@ -938,53 +777,6 @@ func (up *Updater) cleanupOldDownloads(glob string) { } } -func msiUUIDForVersion(ver string) string { - arch := runtime.GOARCH - if arch == "386" { - arch = "x86" - } - track, err := versionToTrack(ver) - if err != nil { - track = UnstableTrack - } - msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch) - return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}" -} - -func makeSelfCopy() (origPathExe, tmpPathExe string, err error) { - selfExe, err := os.Executable() - if err != nil { - return "", "", err - } - f, err := os.Open(selfExe) - if err != nil { - return "", "", err - } - defer f.Close() - f2, err := os.CreateTemp("", "tailscale-updater-*.exe") - if err != nil { - return "", "", err - } - if f := markTempFileFunc; f != nil { - if err := f(f2.Name()); err != nil { - return "", "", err - } - } - if _, err := io.Copy(f2, f); err != nil { - f2.Close() - return "", "", err - } - return selfExe, f2.Name(), f2.Close() -} - -func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { - c, err := distsign.NewClient(up.Logf, up.PkgsAddr) - if err != nil { - return err - } - return c.Download(context.Background(), pathSrc, fileDst) -} - func (up *Updater) updateFreeBSD() (err error) { if up.Version != "" { return errors.New("installing a specific version on FreeBSD is not supported") diff --git a/clientupdate/clientupdate_downloads.go b/clientupdate/clientupdate_downloads.go new file mode 100644 index 0000000000000..18d3176b42afe --- /dev/null +++ b/clientupdate/clientupdate_downloads.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !android) || windows + +package clientupdate + +import ( + "context" + + "tailscale.com/clientupdate/distsign" +) + +func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { + c, err := distsign.NewClient(up.Logf, up.PkgsAddr) + if err != nil { + return err + } + return c.Download(context.Background(), pathSrc, fileDst) +} diff --git a/clientupdate/clientupdate_not_downloads.go b/clientupdate/clientupdate_not_downloads.go new file mode 100644 index 0000000000000..057b4f2cd7574 --- /dev/null +++ b/clientupdate/clientupdate_not_downloads.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !((linux && !android) || windows) + +package clientupdate + +func (up *Updater) downloadURLToFile(pathSrc, fileDst string) (ret error) { + panic("unreachable") +} diff --git a/clientupdate/clientupdate_notwindows.go b/clientupdate/clientupdate_notwindows.go new file mode 100644 index 0000000000000..edadc210c8a15 --- /dev/null +++ b/clientupdate/clientupdate_notwindows.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package clientupdate + +func (up *Updater) updateWindows() error { + panic("unreachable") +} diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index 2f6899a605b41..9737229745332 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -7,13 +7,57 @@ package clientupdate import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + + "github.com/google/uuid" "golang.org/x/sys/windows" + "tailscale.com/util/winutil" "tailscale.com/util/winutil/authenticode" ) -func init() { - markTempFileFunc = markTempFileWindows - verifyAuthenticode = verifyTailscale +const ( + // winMSIEnv is the environment variable that, if set, is the MSI file for + // the update command to install. It's passed like this so we can stop the + // tailscale.exe process from running before the msiexec process runs and + // tries to overwrite ourselves. + winMSIEnv = "TS_UPDATE_WIN_MSI" + // winExePathEnv is the environment variable that is set along with + // winMSIEnv and carries the full path of the calling tailscale.exe binary. + // It is used to re-launch the GUI process (tailscale-ipn.exe) after + // install is complete. + winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" +) + +func makeSelfCopy() (origPathExe, tmpPathExe string, err error) { + selfExe, err := os.Executable() + if err != nil { + return "", "", err + } + f, err := os.Open(selfExe) + if err != nil { + return "", "", err + } + defer f.Close() + f2, err := os.CreateTemp("", "tailscale-updater-*.exe") + if err != nil { + return "", "", err + } + if err := markTempFileWindows(f2.Name()); err != nil { + return "", "", err + } + if _, err := io.Copy(f2, f); err != nil { + f2.Close() + return "", "", err + } + return selfExe, f2.Name(), f2.Close() } func markTempFileWindows(name string) error { @@ -23,6 +67,159 @@ func markTempFileWindows(name string) error { const certSubjectTailscale = "Tailscale Inc." -func verifyTailscale(path string) error { +func verifyAuthenticode(path string) error { return authenticode.Verify(path, certSubjectTailscale) } + +func (up *Updater) updateWindows() error { + if msi := os.Getenv(winMSIEnv); msi != "" { + // stdout/stderr from this part of the install could be lost since the + // parent tailscaled is replaced. Create a temp log file to have some + // output to debug with in case update fails. + close, err := up.switchOutputToFile() + if err != nil { + up.Logf("failed to create log file for installation: %v; proceeding with existing outputs", err) + } else { + defer close.Close() + } + + up.Logf("installing %v ...", msi) + if err := up.installMSI(msi); err != nil { + up.Logf("MSI install failed: %v", err) + return err + } + + up.Logf("success.") + return nil + } + + if !winutil.IsCurrentProcessElevated() { + return errors.New(`update must be run as Administrator + +you can run the command prompt as Administrator one of these ways: +* right-click cmd.exe, select 'Run as administrator' +* press Windows+x, then press a +* press Windows+r, type in "cmd", then press Ctrl+Shift+Enter`) + } + ver, err := requestedTailscaleVersion(up.Version, up.Track) + if err != nil { + return err + } + arch := runtime.GOARCH + if arch == "386" { + arch = "x86" + } + if !up.confirm(ver) { + return nil + } + + tsDir := filepath.Join(os.Getenv("ProgramData"), "Tailscale") + msiDir := filepath.Join(tsDir, "MSICache") + if fi, err := os.Stat(tsDir); err != nil { + return fmt.Errorf("expected %s to exist, got stat error: %w", tsDir, err) + } else if !fi.IsDir() { + return fmt.Errorf("expected %s to be a directory; got %v", tsDir, fi.Mode()) + } + if err := os.MkdirAll(msiDir, 0700); err != nil { + return err + } + up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) + pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) + msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) + if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { + return err + } + + up.Logf("verifying MSI authenticode...") + if err := verifyAuthenticode(msiTarget); err != nil { + return fmt.Errorf("authenticode verification of %s failed: %w", msiTarget, err) + } + up.Logf("authenticode verification succeeded") + + up.Logf("making tailscale.exe copy to switch to...") + up.cleanupOldDownloads(filepath.Join(os.TempDir(), "tailscale-updater-*.exe")) + selfOrig, selfCopy, err := makeSelfCopy() + if err != nil { + return err + } + defer os.Remove(selfCopy) + up.Logf("running tailscale.exe copy for final install...") + + cmd := exec.Command(selfCopy, "update") + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig) + cmd.Stdout = up.Stderr + cmd.Stderr = up.Stderr + cmd.Stdin = os.Stdin + if err := cmd.Start(); err != nil { + return err + } + // Once it's started, exit ourselves, so the binary is free + // to be replaced. + os.Exit(0) + panic("unreachable") +} + +func (up *Updater) installMSI(msi string) error { + var err error + for tries := 0; tries < 2; tries++ { + cmd := exec.Command("msiexec.exe", "/i", filepath.Base(msi), "/quiet", "/norestart", "/qn") + cmd.Dir = filepath.Dir(msi) + cmd.Stdout = up.Stdout + cmd.Stderr = up.Stderr + cmd.Stdin = os.Stdin + err = cmd.Run() + if err == nil { + break + } + up.Logf("Install attempt failed: %v", err) + uninstallVersion := up.currentVersion + if v := os.Getenv("TS_DEBUG_UNINSTALL_VERSION"); v != "" { + uninstallVersion = v + } + // Assume it's a downgrade, which msiexec won't permit. Uninstall our current version first. + up.Logf("Uninstalling current version %q for downgrade...", uninstallVersion) + cmd = exec.Command("msiexec.exe", "/x", msiUUIDForVersion(uninstallVersion), "/norestart", "/qn") + cmd.Stdout = up.Stdout + cmd.Stderr = up.Stderr + cmd.Stdin = os.Stdin + err = cmd.Run() + up.Logf("msiexec uninstall: %v", err) + } + return err +} + +func msiUUIDForVersion(ver string) string { + arch := runtime.GOARCH + if arch == "386" { + arch = "x86" + } + track, err := versionToTrack(ver) + if err != nil { + track = UnstableTrack + } + msiURL := fmt.Sprintf("https://pkgs.tailscale.com/%s/tailscale-setup-%s-%s.msi", track, ver, arch) + return "{" + strings.ToUpper(uuid.NewSHA1(uuid.NameSpaceURL, []byte(msiURL)).String()) + "}" +} + +func (up *Updater) switchOutputToFile() (io.Closer, error) { + var logFilePath string + exePath, err := os.Executable() + if err != nil { + logFilePath = filepath.Join(os.TempDir(), "tailscale-updater.log") + } else { + logFilePath = strings.TrimSuffix(exePath, ".exe") + ".log" + } + + up.Logf("writing update output to %q", logFilePath) + logFile, err := os.Create(logFilePath) + if err != nil { + return nil, err + } + + up.Logf = func(m string, args ...any) { + fmt.Fprintf(logFile, m+"\n", args...) + } + up.Stdout = logFile + up.Stderr = logFile + return logFile, nil +} diff --git a/cmd/containerboot/forwarding.go b/cmd/containerboot/forwarding.go new file mode 100644 index 0000000000000..04d34836c92d8 --- /dev/null +++ b/cmd/containerboot/forwarding.go @@ -0,0 +1,262 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "fmt" + "log" + "net" + "net/netip" + "os" + "path/filepath" + "strings" + + "tailscale.com/util/linuxfw" +) + +// ensureIPForwarding enables IPv4/IPv6 forwarding for the container. +func ensureIPForwarding(root, clusterProxyTargetIP, tailnetTargetIP, tailnetTargetFQDN string, routes *string) error { + var ( + v4Forwarding, v6Forwarding bool + ) + if clusterProxyTargetIP != "" { + proxyIP, err := netip.ParseAddr(clusterProxyTargetIP) + if err != nil { + return fmt.Errorf("invalid cluster destination IP: %v", err) + } + if proxyIP.Is4() { + v4Forwarding = true + } else { + v6Forwarding = true + } + } + if tailnetTargetIP != "" { + proxyIP, err := netip.ParseAddr(tailnetTargetIP) + if err != nil { + return fmt.Errorf("invalid tailnet destination IP: %v", err) + } + if proxyIP.Is4() { + v4Forwarding = true + } else { + v6Forwarding = true + } + } + // Currently we only proxy traffic to the IPv4 address of the tailnet + // target. + if tailnetTargetFQDN != "" { + v4Forwarding = true + } + if routes != nil && *routes != "" { + for _, route := range strings.Split(*routes, ",") { + cidr, err := netip.ParsePrefix(route) + if err != nil { + return fmt.Errorf("invalid subnet route: %v", err) + } + if cidr.Addr().Is4() { + v4Forwarding = true + } else { + v6Forwarding = true + } + } + } + return enableIPForwarding(v4Forwarding, v6Forwarding, root) +} + +func enableIPForwarding(v4Forwarding, v6Forwarding bool, root string) error { + var paths []string + if v4Forwarding { + paths = append(paths, filepath.Join(root, "proc/sys/net/ipv4/ip_forward")) + } + if v6Forwarding { + paths = append(paths, filepath.Join(root, "proc/sys/net/ipv6/conf/all/forwarding")) + } + + // In some common configurations (e.g. default docker, + // kubernetes), the container environment denies write access to + // most sysctls, including IP forwarding controls. Check the + // sysctl values before trying to change them, so that we + // gracefully do nothing if the container's already been set up + // properly by e.g. a k8s initContainer. + for _, path := range paths { + bs, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("reading %q: %w", path, err) + } + if v := strings.TrimSpace(string(bs)); v != "1" { + if err := os.WriteFile(path, []byte("1"), 0644); err != nil { + return fmt.Errorf("enabling %q: %w", path, err) + } + } + } + return nil +} + +func installEgressForwardingRule(_ context.Context, dstStr string, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { + dst, err := netip.ParseAddr(dstStr) + if err != nil { + return err + } + var local netip.Addr + for _, pfx := range tsIPs { + if !pfx.IsSingleIP() { + continue + } + if pfx.Addr().Is4() != dst.Is4() { + continue + } + local = pfx.Addr() + break + } + if !local.IsValid() { + return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs) + } + if err := nfr.DNATNonTailscaleTraffic("tailscale0", dst); err != nil { + return fmt.Errorf("installing egress proxy rules: %w", err) + } + if err := nfr.EnsureSNATForDst(local, dst); err != nil { + return fmt.Errorf("installing egress proxy rules: %w", err) + } + if err := nfr.ClampMSSToPMTU("tailscale0", dst); err != nil { + return fmt.Errorf("installing egress proxy rules: %w", err) + } + return nil +} + +// installTSForwardingRuleForDestination accepts a destination address and a +// list of node's tailnet addresses, sets up rules to forward traffic for +// destination to the tailnet IP matching the destination IP family. +// Destination can be Pod IP of this node. +func installTSForwardingRuleForDestination(_ context.Context, dstFilter string, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { + dst, err := netip.ParseAddr(dstFilter) + if err != nil { + return err + } + var local netip.Addr + for _, pfx := range tsIPs { + if !pfx.IsSingleIP() { + continue + } + if pfx.Addr().Is4() != dst.Is4() { + continue + } + local = pfx.Addr() + break + } + if !local.IsValid() { + return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstFilter, tsIPs) + } + if err := nfr.AddDNATRule(dst, local); err != nil { + return fmt.Errorf("installing rule for forwarding traffic to tailnet IP: %w", err) + } + return nil +} + +func installIngressForwardingRule(_ context.Context, dstStr string, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { + dst, err := netip.ParseAddr(dstStr) + if err != nil { + return err + } + var local netip.Addr + proxyHasIPv4Address := false + for _, pfx := range tsIPs { + if !pfx.IsSingleIP() { + continue + } + if pfx.Addr().Is4() { + proxyHasIPv4Address = true + } + if pfx.Addr().Is4() != dst.Is4() { + continue + } + local = pfx.Addr() + break + } + if proxyHasIPv4Address && dst.Is6() { + log.Printf("Warning: proxy backend ClusterIP is an IPv6 address and the proxy has a IPv4 tailnet address. You might need to disable IPv4 address allocation for the proxy for forwarding to work. See https://github.com/tailscale/tailscale/issues/12156") + } + if !local.IsValid() { + return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs) + } + if err := nfr.AddDNATRule(local, dst); err != nil { + return fmt.Errorf("installing ingress proxy rules: %w", err) + } + if err := nfr.ClampMSSToPMTU("tailscale0", dst); err != nil { + return fmt.Errorf("installing ingress proxy rules: %w", err) + } + return nil +} + +func installIngressForwardingRuleForDNSTarget(_ context.Context, backendAddrs []net.IP, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { + var ( + tsv4 netip.Addr + tsv6 netip.Addr + v4Backends []netip.Addr + v6Backends []netip.Addr + ) + for _, pfx := range tsIPs { + if pfx.IsSingleIP() && pfx.Addr().Is4() { + tsv4 = pfx.Addr() + continue + } + if pfx.IsSingleIP() && pfx.Addr().Is6() { + tsv6 = pfx.Addr() + continue + } + } + // TODO: log if more than one backend address is found and firewall is + // in nftables mode that only the first IP will be used. + for _, ip := range backendAddrs { + if ip.To4() != nil { + v4Backends = append(v4Backends, netip.AddrFrom4([4]byte(ip.To4()))) + } + if ip.To16() != nil { + v6Backends = append(v6Backends, netip.AddrFrom16([16]byte(ip.To16()))) + } + } + + // Enable IP forwarding here as opposed to at the start of containerboot + // as the IPv4/IPv6 requirements might have changed. + // For Kubernetes operator proxies, forwarding for both IPv4 and IPv6 is + // enabled by an init container, so in practice enabling forwarding here + // is only needed if this proxy has been configured by manually setting + // TS_EXPERIMENTAL_DEST_DNS_NAME env var for a containerboot instance. + if err := enableIPForwarding(len(v4Backends) != 0, len(v6Backends) != 0, ""); err != nil { + log.Printf("[unexpected] failed to ensure IP forwarding: %v", err) + } + + updateFirewall := func(dst netip.Addr, backendTargets []netip.Addr) error { + if err := nfr.DNATWithLoadBalancer(dst, backendTargets); err != nil { + return fmt.Errorf("installing DNAT rules for ingress backends %+#v: %w", backendTargets, err) + } + // The backend might advertize MSS higher than that of the + // tailscale interfaces. Clamp MSS of packets going out via + // tailscale0 interface to its MTU to prevent broken connections + // in environments where path MTU discovery is not working. + if err := nfr.ClampMSSToPMTU("tailscale0", dst); err != nil { + return fmt.Errorf("adding rule to clamp traffic via tailscale0: %v", err) + } + return nil + } + + if len(v4Backends) != 0 { + if !tsv4.IsValid() { + log.Printf("backend targets %v contain at least one IPv4 address, but this node's Tailscale IPs do not contain a valid IPv4 address: %v", backendAddrs, tsIPs) + } else if err := updateFirewall(tsv4, v4Backends); err != nil { + return fmt.Errorf("Installing IPv4 firewall rules: %w", err) + } + } + if len(v6Backends) != 0 && !tsv6.IsValid() { + if !tsv6.IsValid() { + log.Printf("backend targets %v contain at least one IPv6 address, but this node's Tailscale IPs do not contain a valid IPv6 address: %v", backendAddrs, tsIPs) + } else if !nfr.HasIPV6NAT() { + log.Printf("backend targets %v contain at least one IPv6 address, but the chosen firewall mode does not support IPv6 NAT", backendAddrs) + } else if err := updateFirewall(tsv6, v6Backends); err != nil { + return fmt.Errorf("Installing IPv6 firewall rules: %w", err) + } + } + return nil +} diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go new file mode 100644 index 0000000000000..fb7fccd968816 --- /dev/null +++ b/cmd/containerboot/healthz.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "log" + "net" + "net/http" + "sync" +) + +// healthz is a simple health check server, if enabled it returns 200 OK if +// this tailscale node currently has at least one tailnet IP address else +// returns 503. +type healthz struct { + sync.Mutex + hasAddrs bool +} + +func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Lock() + defer h.Unlock() + if h.hasAddrs { + w.Write([]byte("ok")) + } else { + http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError) + } +} + +// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the +// provided address. A containerized tailscale instance is considered healthy if +// it has at least one tailnet IP address. +func runHealthz(addr string, h *healthz) { + lis, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err) + } + mux := http.NewServeMux() + mux.Handle("/healthz", h) + log.Printf("Running healthcheck endpoint at %s/healthz", addr) + hs := &http.Server{Handler: mux} + + go func() { + if err := hs.Serve(lis); err != nil { + log.Fatalf("failed running health endpoint: %v", err) + } + }() +} diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index ec2d3ef1236d4..908cc01efc25a 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -8,7 +8,6 @@ package main import ( "context" "encoding/json" - "errors" "fmt" "log" "net/http" @@ -75,56 +74,6 @@ func deleteAuthKey(ctx context.Context, secretName string) error { var kc kubeclient.Client -// setupKube is responsible for doing any necessary configuration and checks to -// ensure that tailscale state storage and authentication mechanism will work on -// Kubernetes. -func (cfg *settings) setupKube(ctx context.Context) error { - if cfg.KubeSecret == "" { - return nil - } - canPatch, canCreate, err := kc.CheckSecretPermissions(ctx, cfg.KubeSecret) - if err != nil { - return fmt.Errorf("Some Kubernetes permissions are missing, please check your RBAC configuration: %v", err) - } - cfg.KubernetesCanPatch = canPatch - - s, err := kc.GetSecret(ctx, cfg.KubeSecret) - if err != nil && kubeclient.IsNotFoundErr(err) && !canCreate { - return fmt.Errorf("Tailscale state Secret %s does not exist and we don't have permissions to create it. "+ - "If you intend to store tailscale state elsewhere than a Kubernetes Secret, "+ - "you can explicitly set TS_KUBE_SECRET env var to an empty string. "+ - "Else ensure that RBAC is set up that allows the service account associated with this installation to create Secrets.", cfg.KubeSecret) - } else if err != nil && !kubeclient.IsNotFoundErr(err) { - return fmt.Errorf("Getting Tailscale state Secret %s: %v", cfg.KubeSecret, err) - } - - if cfg.AuthKey == "" && !isOneStepConfig(cfg) { - if s == nil { - log.Print("TS_AUTHKEY not provided and kube secret does not exist, login will be interactive if needed.") - return nil - } - keyBytes, _ := s.Data["authkey"] - key := string(keyBytes) - - if key != "" { - // This behavior of pulling authkeys from kube secrets was added - // at the same time as the patch permission, so we can enforce - // that we must be able to patch out the authkey after - // authenticating if you want to use this feature. This avoids - // us having to deal with the case where we might leave behind - // an unnecessary reusable authkey in a secret, like a rake in - // the grass. - if !cfg.KubernetesCanPatch { - return errors.New("authkey found in TS_KUBE_SECRET, but the pod doesn't have patch permissions on the secret to manage the authkey.") - } - cfg.AuthKey = key - } else { - log.Print("No authkey found in kube secret and TS_AUTHKEY not provided, login will be interactive if needed.") - } - } - return nil -} - func initKubeClient(root string) { if root != "/" { // If we are running in a test, we need to set the root path to the fake diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index fdf71c3eae398..4c8ba58073c69 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -92,36 +92,28 @@ package main import ( - "bytes" "context" - "encoding/json" "errors" "fmt" "io/fs" "log" "math" "net" - "net/http" "net/netip" "os" - "os/exec" "os/signal" "path" "path/filepath" - "reflect" "slices" - "strconv" "strings" "sync" "sync/atomic" "syscall" "time" - "github.com/fsnotify/fsnotify" "golang.org/x/sys/unix" "tailscale.com/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/conffile" kubeutils "tailscale.com/k8s-operator" "tailscale.com/tailcfg" "tailscale.com/types/logger" @@ -140,35 +132,9 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) { func main() { log.SetPrefix("boot: ") tailscale.I_Acknowledge_This_API_Is_Unstable = true - cfg := &settings{ - AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), - Hostname: defaultEnv("TS_HOSTNAME", ""), - Routes: defaultEnvStringPointer("TS_ROUTES"), - ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), - ProxyTargetIP: defaultEnv("TS_DEST_IP", ""), - ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""), - TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""), - TailnetTargetFQDN: defaultEnv("TS_TAILNET_TARGET_FQDN", ""), - DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""), - ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""), - InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "", - UserspaceMode: defaultBool("TS_USERSPACE", true), - StateDir: defaultEnv("TS_STATE_DIR", ""), - AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"), - KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"), - SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""), - HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), - Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), - AuthOnce: defaultBool("TS_AUTH_ONCE", false), - Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"), - TailscaledConfigFilePath: tailscaledConfigFilePath(), - AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false), - PodIP: defaultEnv("POD_IP", ""), - EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), - HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), - } - if err := cfg.validate(); err != nil { + cfg, err := configFromEnv() + if err != nil { log.Fatalf("invalid configuration: %v", err) } @@ -283,10 +249,8 @@ authLoop: switch *n.State { case ipn.NeedsLogin: if isOneStepConfig(cfg) { - // This could happen if this is the - // first time tailscaled was run for - // this device and the auth key was not - // passed via the configfile. + // This could happen if this is the first time tailscaled was run for this + // device and the auth key was not passed via the configfile. log.Fatalf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.") } if err := authTailscale(); err != nil { @@ -384,6 +348,9 @@ authLoop: } }) ) + // egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+ + // egress services in HA mode and errored. + var egressSvcsErrorChan = make(chan error) defer t.Stop() // resetTimer resets timer for when to next attempt to resolve the DNS // name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The @@ -409,6 +376,7 @@ authLoop: failedResolveAttempts++ } + var egressSvcsNotify chan ipn.Notify notifyChan := make(chan ipn.Notify) errChan := make(chan error) go func() { @@ -486,7 +454,11 @@ runLoop: egressAddrs = node.Addresses().AsSlice() newCurentEgressIPs = deephash.Hash(&egressAddrs) egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs - if egressIPsHaveChanged && len(egressAddrs) != 0 { + // The firewall rules get (re-)installed: + // - on startup + // - when the tailnet IPs of the tailnet target have changed + // - when the tailnet IPs of this node have changed + if (egressIPsHaveChanged || ipsHaveChanged) && len(egressAddrs) != 0 { var rulesInstalled bool for _, egressAddr := range egressAddrs { ea := egressAddr.Addr() @@ -583,31 +555,50 @@ runLoop: h.Unlock() healthzRunner() } + if egressSvcsNotify != nil { + egressSvcsNotify <- n + } } if !startupTasksDone { - // For containerboot instances that act as TCP - // proxies (proxying traffic to an endpoint - // passed via one of the env vars that - // containerbot reads) and store state in a - // Kubernetes Secret, we consider startup tasks - // done at the point when device info has been - // successfully stored to state Secret. - // For all other containerboot instances, if we - // just get to this point the startup tasks can - // be considered done. + // For containerboot instances that act as TCP proxies (proxying traffic to an endpoint + // passed via one of the env vars that containerboot reads) and store state in a + // Kubernetes Secret, we consider startup tasks done at the point when device info has + // been successfully stored to state Secret. For all other containerboot instances, if + // we just get to this point the startup tasks can be considered done. if !isL3Proxy(cfg) || !hasKubeStateStore(cfg) || (currentDeviceEndpoints != deephash.Sum{} && currentDeviceID != deephash.Sum{}) { // This log message is used in tests to detect when all // post-auth configuration is done. log.Println("Startup complete, waiting for shutdown signal") startupTasksDone = true - // Wait on tailscaled process. It won't - // be cleaned up by default when the - // container exits as it is not PID1. - // TODO (irbekrm): perhaps we can - // replace the reaper by a running - // cmd.Wait in a goroutine immediately - // after starting tailscaled? + // Configure egress proxy. Egress proxy will set up firewall rules to proxy + // traffic to tailnet targets configured in the provided configuration file. It + // will then continuously monitor the config file and netmap updates and + // reconfigure the firewall rules as needed. If any of its operations fail, it + // will crash this node. + if cfg.EgressSvcsCfgPath != "" { + log.Printf("configuring egress proxy using configuration file at %s", cfg.EgressSvcsCfgPath) + egressSvcsNotify = make(chan ipn.Notify) + ep := egressProxy{ + cfgPath: cfg.EgressSvcsCfgPath, + nfr: nfr, + kc: kc, + stateSecret: cfg.KubeSecret, + netmapChan: egressSvcsNotify, + podIPv4: cfg.PodIPv4, + tailnetAddrs: addrs, + } + go func() { + if err := ep.run(ctx, n); err != nil { + egressSvcsErrorChan <- err + } + }() + } + + // Wait on tailscaled process. It won't be cleaned up by default when the + // container exits as it is not PID1. TODO (irbekrm): perhaps we can replace the + // reaper by a running cmd.Wait in a goroutine immediately after starting + // tailscaled? reaper := func() { defer wg.Done() for { @@ -645,226 +636,13 @@ runLoop: } backendAddrs = newBackendAddrs resetTimer(false) + case e := <-egressSvcsErrorChan: + log.Fatalf("egress proxy failed: %v", e) } } wg.Wait() } -// watchServeConfigChanges watches path for changes, and when it sees one, reads -// the serve config from it, replacing ${TS_CERT_DOMAIN} with certDomain, and -// applies it to lc. It exits when ctx is canceled. cdChanged is a channel that -// is written to when the certDomain changes, causing the serve config to be -// re-read and applied. -func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient) { - if certDomainAtomic == nil { - panic("cd must not be nil") - } - var tickChan <-chan time.Time - var eventChan <-chan fsnotify.Event - if w, err := fsnotify.NewWatcher(); err != nil { - log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - tickChan = ticker.C - } else { - defer w.Close() - if err := w.Add(filepath.Dir(path)); err != nil { - log.Fatalf("failed to add fsnotify watch: %v", err) - } - eventChan = w.Events - } - - var certDomain string - var prevServeConfig *ipn.ServeConfig - for { - select { - case <-ctx.Done(): - return - case <-cdChanged: - certDomain = *certDomainAtomic.Load() - case <-tickChan: - case <-eventChan: - // We can't do any reasonable filtering on the event because of how - // k8s handles these mounts. So just re-read the file and apply it - // if it's changed. - } - if certDomain == "" { - continue - } - sc, err := readServeConfig(path, certDomain) - if err != nil { - log.Fatalf("failed to read serve config: %v", err) - } - if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { - continue - } - log.Printf("Applying serve config") - if err := lc.SetServeConfig(ctx, sc); err != nil { - log.Fatalf("failed to set serve config: %v", err) - } - prevServeConfig = sc - } -} - -// readServeConfig reads the ipn.ServeConfig from path, replacing -// ${TS_CERT_DOMAIN} with certDomain. -func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { - if path == "" { - return nil, nil - } - j, err := os.ReadFile(path) - if err != nil { - return nil, err - } - j = bytes.ReplaceAll(j, []byte("${TS_CERT_DOMAIN}"), []byte(certDomain)) - var sc ipn.ServeConfig - if err := json.Unmarshal(j, &sc); err != nil { - return nil, err - } - return &sc, nil -} - -func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient, *os.Process, error) { - args := tailscaledArgs(cfg) - // tailscaled runs without context, since it needs to persist - // beyond the startup timeout in ctx. - cmd := exec.Command("tailscaled", args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } - log.Printf("Starting tailscaled") - if err := cmd.Start(); err != nil { - return nil, nil, fmt.Errorf("starting tailscaled failed: %v", err) - } - - // Wait for the socket file to appear, otherwise API ops will racily fail. - log.Printf("Waiting for tailscaled socket") - for { - if ctx.Err() != nil { - log.Fatalf("Timed out waiting for tailscaled socket") - } - _, err := os.Stat(cfg.Socket) - if errors.Is(err, fs.ErrNotExist) { - time.Sleep(100 * time.Millisecond) - continue - } else if err != nil { - log.Fatalf("Waiting for tailscaled socket: %v", err) - } - break - } - - tsClient := &tailscale.LocalClient{ - Socket: cfg.Socket, - UseSocketOnly: true, - } - - return tsClient, cmd.Process, nil -} - -// tailscaledArgs uses cfg to construct the argv for tailscaled. -func tailscaledArgs(cfg *settings) []string { - args := []string{"--socket=" + cfg.Socket} - switch { - case cfg.InKubernetes && cfg.KubeSecret != "": - args = append(args, "--state=kube:"+cfg.KubeSecret) - if cfg.StateDir == "" { - cfg.StateDir = "/tmp" - } - fallthrough - case cfg.StateDir != "": - args = append(args, "--statedir="+cfg.StateDir) - default: - args = append(args, "--state=mem:", "--statedir=/tmp") - } - - if cfg.UserspaceMode { - args = append(args, "--tun=userspace-networking") - } else if err := ensureTunFile(cfg.Root); err != nil { - log.Fatalf("ensuring that /dev/net/tun exists: %v", err) - } - - if cfg.SOCKSProxyAddr != "" { - args = append(args, "--socks5-server="+cfg.SOCKSProxyAddr) - } - if cfg.HTTPProxyAddr != "" { - args = append(args, "--outbound-http-proxy-listen="+cfg.HTTPProxyAddr) - } - if cfg.TailscaledConfigFilePath != "" { - args = append(args, "--config="+cfg.TailscaledConfigFilePath) - } - if cfg.DaemonExtraArgs != "" { - args = append(args, strings.Fields(cfg.DaemonExtraArgs)...) - } - return args -} - -// tailscaleUp uses cfg to run 'tailscale up' everytime containerboot starts, or -// if TS_AUTH_ONCE is set, only the first time containerboot starts. -func tailscaleUp(ctx context.Context, cfg *settings) error { - args := []string{"--socket=" + cfg.Socket, "up"} - if cfg.AcceptDNS != nil && *cfg.AcceptDNS { - args = append(args, "--accept-dns=true") - } else { - args = append(args, "--accept-dns=false") - } - if cfg.AuthKey != "" { - args = append(args, "--authkey="+cfg.AuthKey) - } - // --advertise-routes can be passed an empty string to configure a - // device (that might have previously advertised subnet routes) to not - // advertise any routes. Respect an empty string passed by a user and - // use it to explicitly unset the routes. - if cfg.Routes != nil { - args = append(args, "--advertise-routes="+*cfg.Routes) - } - if cfg.Hostname != "" { - args = append(args, "--hostname="+cfg.Hostname) - } - if cfg.ExtraArgs != "" { - args = append(args, strings.Fields(cfg.ExtraArgs)...) - } - log.Printf("Running 'tailscale up'") - cmd := exec.CommandContext(ctx, "tailscale", args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("tailscale up failed: %v", err) - } - return nil -} - -// tailscaleSet uses cfg to run 'tailscale set' to set any known configuration -// options that are passed in via environment variables. This is run after the -// node is in Running state and only if TS_AUTH_ONCE is set. -func tailscaleSet(ctx context.Context, cfg *settings) error { - args := []string{"--socket=" + cfg.Socket, "set"} - if cfg.AcceptDNS != nil && *cfg.AcceptDNS { - args = append(args, "--accept-dns=true") - } else { - args = append(args, "--accept-dns=false") - } - // --advertise-routes can be passed an empty string to configure a - // device (that might have previously advertised subnet routes) to not - // advertise any routes. Respect an empty string passed by a user and - // use it to explicitly unset the routes. - if cfg.Routes != nil { - args = append(args, "--advertise-routes="+*cfg.Routes) - } - if cfg.Hostname != "" { - args = append(args, "--hostname="+cfg.Hostname) - } - log.Printf("Running 'tailscale set'") - cmd := exec.CommandContext(ctx, "tailscale", args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("tailscale set failed: %v", err) - } - return nil -} - // ensureTunFile checks that /dev/net/tun exists, creating it if // missing. func ensureTunFile(root string) error { @@ -884,350 +662,6 @@ func ensureTunFile(root string) error { return nil } -// ensureIPForwarding enables IPv4/IPv6 forwarding for the container. -func ensureIPForwarding(root, clusterProxyTargetIP, tailnetTargetIP, tailnetTargetFQDN string, routes *string) error { - var ( - v4Forwarding, v6Forwarding bool - ) - if clusterProxyTargetIP != "" { - proxyIP, err := netip.ParseAddr(clusterProxyTargetIP) - if err != nil { - return fmt.Errorf("invalid cluster destination IP: %v", err) - } - if proxyIP.Is4() { - v4Forwarding = true - } else { - v6Forwarding = true - } - } - if tailnetTargetIP != "" { - proxyIP, err := netip.ParseAddr(tailnetTargetIP) - if err != nil { - return fmt.Errorf("invalid tailnet destination IP: %v", err) - } - if proxyIP.Is4() { - v4Forwarding = true - } else { - v6Forwarding = true - } - } - // Currently we only proxy traffic to the IPv4 address of the tailnet - // target. - if tailnetTargetFQDN != "" { - v4Forwarding = true - } - if routes != nil && *routes != "" { - for _, route := range strings.Split(*routes, ",") { - cidr, err := netip.ParsePrefix(route) - if err != nil { - return fmt.Errorf("invalid subnet route: %v", err) - } - if cidr.Addr().Is4() { - v4Forwarding = true - } else { - v6Forwarding = true - } - } - } - return enableIPForwarding(v4Forwarding, v6Forwarding, root) -} - -func enableIPForwarding(v4Forwarding, v6Forwarding bool, root string) error { - var paths []string - if v4Forwarding { - paths = append(paths, filepath.Join(root, "proc/sys/net/ipv4/ip_forward")) - } - if v6Forwarding { - paths = append(paths, filepath.Join(root, "proc/sys/net/ipv6/conf/all/forwarding")) - } - - // In some common configurations (e.g. default docker, - // kubernetes), the container environment denies write access to - // most sysctls, including IP forwarding controls. Check the - // sysctl values before trying to change them, so that we - // gracefully do nothing if the container's already been set up - // properly by e.g. a k8s initContainer. - for _, path := range paths { - bs, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("reading %q: %w", path, err) - } - if v := strings.TrimSpace(string(bs)); v != "1" { - if err := os.WriteFile(path, []byte("1"), 0644); err != nil { - return fmt.Errorf("enabling %q: %w", path, err) - } - } - } - return nil -} - -func installEgressForwardingRule(_ context.Context, dstStr string, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { - dst, err := netip.ParseAddr(dstStr) - if err != nil { - return err - } - var local netip.Addr - for _, pfx := range tsIPs { - if !pfx.IsSingleIP() { - continue - } - if pfx.Addr().Is4() != dst.Is4() { - continue - } - local = pfx.Addr() - break - } - if !local.IsValid() { - return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs) - } - if err := nfr.DNATNonTailscaleTraffic("tailscale0", dst); err != nil { - return fmt.Errorf("installing egress proxy rules: %w", err) - } - if err := nfr.AddSNATRuleForDst(local, dst); err != nil { - return fmt.Errorf("installing egress proxy rules: %w", err) - } - if err := nfr.ClampMSSToPMTU("tailscale0", dst); err != nil { - return fmt.Errorf("installing egress proxy rules: %w", err) - } - return nil -} - -// installTSForwardingRuleForDestination accepts a destination address and a -// list of node's tailnet addresses, sets up rules to forward traffic for -// destination to the tailnet IP matching the destination IP family. -// Destination can be Pod IP of this node. -func installTSForwardingRuleForDestination(ctx context.Context, dstFilter string, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { - dst, err := netip.ParseAddr(dstFilter) - if err != nil { - return err - } - var local netip.Addr - for _, pfx := range tsIPs { - if !pfx.IsSingleIP() { - continue - } - if pfx.Addr().Is4() != dst.Is4() { - continue - } - local = pfx.Addr() - break - } - if !local.IsValid() { - return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstFilter, tsIPs) - } - if err := nfr.AddDNATRule(dst, local); err != nil { - return fmt.Errorf("installing rule for forwarding traffic to tailnet IP: %w", err) - } - return nil -} - -func installIngressForwardingRule(ctx context.Context, dstStr string, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { - dst, err := netip.ParseAddr(dstStr) - if err != nil { - return err - } - var local netip.Addr - proxyHasIPv4Address := false - for _, pfx := range tsIPs { - if !pfx.IsSingleIP() { - continue - } - if pfx.Addr().Is4() { - proxyHasIPv4Address = true - } - if pfx.Addr().Is4() != dst.Is4() { - continue - } - local = pfx.Addr() - break - } - if proxyHasIPv4Address && dst.Is6() { - log.Printf("Warning: proxy backend ClusterIP is an IPv6 address and the proxy has a IPv4 tailnet address. You might need to disable IPv4 address allocation for the proxy for forwarding to work. See https://github.com/tailscale/tailscale/issues/12156") - } - if !local.IsValid() { - return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs) - } - if err := nfr.AddDNATRule(local, dst); err != nil { - return fmt.Errorf("installing ingress proxy rules: %w", err) - } - if err := nfr.ClampMSSToPMTU("tailscale0", dst); err != nil { - return fmt.Errorf("installing ingress proxy rules: %w", err) - } - return nil -} - -func installIngressForwardingRuleForDNSTarget(ctx context.Context, backendAddrs []net.IP, tsIPs []netip.Prefix, nfr linuxfw.NetfilterRunner) error { - var ( - tsv4 netip.Addr - tsv6 netip.Addr - v4Backends []netip.Addr - v6Backends []netip.Addr - ) - for _, pfx := range tsIPs { - if pfx.IsSingleIP() && pfx.Addr().Is4() { - tsv4 = pfx.Addr() - continue - } - if pfx.IsSingleIP() && pfx.Addr().Is6() { - tsv6 = pfx.Addr() - continue - } - } - // TODO: log if more than one backend address is found and firewall is - // in nftables mode that only the first IP will be used. - for _, ip := range backendAddrs { - if ip.To4() != nil { - v4Backends = append(v4Backends, netip.AddrFrom4([4]byte(ip.To4()))) - } - if ip.To16() != nil { - v6Backends = append(v6Backends, netip.AddrFrom16([16]byte(ip.To16()))) - } - } - - // Enable IP forwarding here as opposed to at the start of containerboot - // as the IPv4/IPv6 requirements might have changed. - // For Kubernetes operator proxies, forwarding for both IPv4 and IPv6 is - // enabled by an init container, so in practice enabling forwarding here - // is only needed if this proxy has been configured by manually setting - // TS_EXPERIMENTAL_DEST_DNS_NAME env var for a containerboot instance. - if err := enableIPForwarding(len(v4Backends) != 0, len(v6Backends) != 0, ""); err != nil { - log.Printf("[unexpected] failed to ensure IP forwarding: %v", err) - } - - updateFirewall := func(dst netip.Addr, backendTargets []netip.Addr) error { - if err := nfr.DNATWithLoadBalancer(dst, backendTargets); err != nil { - return fmt.Errorf("installing DNAT rules for ingress backends %+#v: %w", backendTargets, err) - } - // The backend might advertize MSS higher than that of the - // tailscale interfaces. Clamp MSS of packets going out via - // tailscale0 interface to its MTU to prevent broken connections - // in environments where path MTU discovery is not working. - if err := nfr.ClampMSSToPMTU("tailscale0", dst); err != nil { - return fmt.Errorf("adding rule to clamp traffic via tailscale0: %v", err) - } - return nil - } - - if len(v4Backends) != 0 { - if !tsv4.IsValid() { - log.Printf("backend targets %v contain at least one IPv4 address, but this node's Tailscale IPs do not contain a valid IPv4 address: %v", backendAddrs, tsIPs) - } else if err := updateFirewall(tsv4, v4Backends); err != nil { - return fmt.Errorf("Installing IPv4 firewall rules: %w", err) - } - } - if len(v6Backends) != 0 && !tsv6.IsValid() { - if !tsv6.IsValid() { - log.Printf("backend targets %v contain at least one IPv6 address, but this node's Tailscale IPs do not contain a valid IPv6 address: %v", backendAddrs, tsIPs) - } else if !nfr.HasIPV6NAT() { - log.Printf("backend targets %v contain at least one IPv6 address, but the chosen firewall mode does not support IPv6 NAT", backendAddrs) - } else if err := updateFirewall(tsv6, v6Backends); err != nil { - return fmt.Errorf("Installing IPv6 firewall rules: %w", err) - } - } - return nil -} - -// settings is all the configuration for containerboot. -type settings struct { - AuthKey string - Hostname string - Routes *string - // ProxyTargetIP is the destination IP to which all incoming - // Tailscale traffic should be proxied. If empty, no proxying - // is done. This is typically a locally reachable IP. - ProxyTargetIP string - // ProxyTargetDNSName is a DNS name to whose backing IP addresses all - // incoming Tailscale traffic should be proxied. - ProxyTargetDNSName string - // TailnetTargetIP is the destination IP to which all incoming - // non-Tailscale traffic should be proxied. This is typically a - // Tailscale IP. - TailnetTargetIP string - // TailnetTargetFQDN is an MagicDNS name to which all incoming - // non-Tailscale traffic should be proxied. This must be a full Tailnet - // node FQDN. - TailnetTargetFQDN string - ServeConfigPath string - DaemonExtraArgs string - ExtraArgs string - InKubernetes bool - UserspaceMode bool - StateDir string - AcceptDNS *bool - KubeSecret string - SOCKSProxyAddr string - HTTPProxyAddr string - Socket string - AuthOnce bool - Root string - KubernetesCanPatch bool - TailscaledConfigFilePath string - EnableForwardingOptimizations bool - // If set to true and, if this containerboot instance is a Kubernetes - // ingress proxy, set up rules to forward incoming cluster traffic to be - // forwarded to the ingress target in cluster. - AllowProxyingClusterTrafficViaIngress bool - // PodIP is the IP of the Pod if running in Kubernetes. This is used - // when setting up rules to proxy cluster traffic to cluster ingress - // target. - PodIP string - HealthCheckAddrPort string -} - -func (s *settings) validate() error { - if s.TailscaledConfigFilePath != "" { - dir, file := path.Split(s.TailscaledConfigFilePath) - if _, err := os.Stat(dir); err != nil { - return fmt.Errorf("error validating whether directory with tailscaled config file %s exists: %w", dir, err) - } - if _, err := os.Stat(s.TailscaledConfigFilePath); err != nil { - return fmt.Errorf("error validating whether tailscaled config directory %q contains tailscaled config for current capability version %q: %w. If this is a Tailscale Kubernetes operator proxy, please ensure that the version of the operator is not older than the version of the proxy", dir, file, err) - } - if _, err := conffile.Load(s.TailscaledConfigFilePath); err != nil { - return fmt.Errorf("error validating tailscaled configfile contents: %w", err) - } - } - if s.ProxyTargetIP != "" && s.UserspaceMode { - return errors.New("TS_DEST_IP is not supported with TS_USERSPACE") - } - if s.ProxyTargetDNSName != "" && s.UserspaceMode { - return errors.New("TS_EXPERIMENTAL_DEST_DNS_NAME is not supported with TS_USERSPACE") - } - if s.ProxyTargetDNSName != "" && s.ProxyTargetIP != "" { - return errors.New("TS_EXPERIMENTAL_DEST_DNS_NAME and TS_DEST_IP cannot both be set") - } - if s.TailnetTargetIP != "" && s.UserspaceMode { - return errors.New("TS_TAILNET_TARGET_IP is not supported with TS_USERSPACE") - } - if s.TailnetTargetFQDN != "" && s.UserspaceMode { - return errors.New("TS_TAILNET_TARGET_FQDN is not supported with TS_USERSPACE") - } - if s.TailnetTargetFQDN != "" && s.TailnetTargetIP != "" { - return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set") - } - if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "") { - return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.") - } - if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode { - return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode") - } - if s.AllowProxyingClusterTrafficViaIngress && s.ServeConfigPath == "" { - return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is set but this is not a cluster ingress proxy") - } - if s.AllowProxyingClusterTrafficViaIngress && s.PodIP == "" { - return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is set but POD_IP is not set") - } - if s.EnableForwardingOptimizations && s.UserspaceMode { - return errors.New("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS is not supported in userspace mode") - } - if s.HealthCheckAddrPort != "" { - if _, err := netip.ParseAddrPort(s.HealthCheckAddrPort); err != nil { - return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) - } - } - return nil -} - func resolveDNS(ctx context.Context, name string) ([]net.IP, error) { // TODO (irbekrm): look at using recursive.Resolver instead to resolve // the DNS names as well as retrieve TTLs. It looks though that this @@ -1250,57 +684,6 @@ func resolveDNS(ctx context.Context, name string) ([]net.IP, error) { return append(ip4s, ip6s...), nil } -// defaultEnv returns the value of the given envvar name, or defVal if -// unset. -func defaultEnv(name, defVal string) string { - if v, ok := os.LookupEnv(name); ok { - return v - } - return defVal -} - -// defaultEnvStringPointer returns a pointer to the given envvar value if set, else -// returns nil. This is useful in cases where we need to distinguish between a -// variable being set to empty string vs unset. -func defaultEnvStringPointer(name string) *string { - if v, ok := os.LookupEnv(name); ok { - return &v - } - return nil -} - -// defaultEnvBoolPointer returns a pointer to the given envvar value if set, else -// returns nil. This is useful in cases where we need to distinguish between a -// variable being explicitly set to false vs unset. -func defaultEnvBoolPointer(name string) *bool { - v := os.Getenv(name) - ret, err := strconv.ParseBool(v) - if err != nil { - return nil - } - return &ret -} - -func defaultEnvs(names []string, defVal string) string { - for _, name := range names { - if v, ok := os.LookupEnv(name); ok { - return v - } - } - return defVal -} - -// defaultBool returns the boolean value of the given envvar name, or -// defVal if unset or not a bool. -func defaultBool(name string, defVal bool) bool { - v := os.Getenv(name) - ret, err := strconv.ParseBool(v) - if err != nil { - return defVal - } - return ret -} - // contextWithExitSignalWatch watches for SIGTERM/SIGINT signals. It returns a // context that gets cancelled when a signal is received and a cancel function // that can be called to free the resources when the watch should be stopped. @@ -1323,43 +706,6 @@ func contextWithExitSignalWatch() (context.Context, func()) { return ctx, f } -// isTwoStepConfigAuthOnce returns true if the Tailscale node should be configured -// in two steps and login should only happen once. -// Step 1: run 'tailscaled' -// Step 2): -// A) if this is the first time starting this node run 'tailscale up --authkey ' -// B) if this is not the first time starting this node run 'tailscale set '. -func isTwoStepConfigAuthOnce(cfg *settings) bool { - return cfg.AuthOnce && cfg.TailscaledConfigFilePath == "" -} - -// isTwoStepConfigAlwaysAuth returns true if the Tailscale node should be configured -// in two steps and we should log in every time it starts. -// Step 1: run 'tailscaled' -// Step 2): run 'tailscale up --authkey ' -func isTwoStepConfigAlwaysAuth(cfg *settings) bool { - return !cfg.AuthOnce && cfg.TailscaledConfigFilePath == "" -} - -// isOneStepConfig returns true if the Tailscale node should always be ran and -// configured in a single step by running 'tailscaled ' -func isOneStepConfig(cfg *settings) bool { - return cfg.TailscaledConfigFilePath != "" -} - -// isL3Proxy returns true if the Tailscale node needs to be configured to act -// as an L3 proxy, proxying to an endpoint provided via one of the config env -// vars. -func isL3Proxy(cfg *settings) bool { - return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress -} - -// hasKubeStateStore returns true if the state must be stored in a Kubernetes -// Secret. -func hasKubeStateStore(cfg *settings) bool { - return cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" -} - // tailscaledConfigFilePath returns the path to the tailscaled config file that // should be used for the current capability version. It is determined by the // TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR environment variable and looks for a @@ -1396,43 +742,5 @@ func tailscaledConfigFilePath() string { log.Fatalf("no tailscaled config file found in %q for current capability version %q", dir, tailcfg.CurrentCapabilityVersion) } log.Printf("Using tailscaled config file %q for capability version %q", maxCompatVer, tailcfg.CurrentCapabilityVersion) - return path.Join(dir, kubeutils.TailscaledConfigFileNameForCap(maxCompatVer)) -} - -// healthz is a simple health check server, if enabled it returns 200 OK if -// this tailscale node currently has at least one tailnet IP address else -// returns 503. -type healthz struct { - sync.Mutex - hasAddrs bool -} - -func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.Lock() - defer h.Unlock() - if h.hasAddrs { - w.Write([]byte("ok")) - } else { - http.Error(w, "node currently has no tailscale IPs", http.StatusInternalServerError) - } -} - -// runHealthz runs a simple HTTP health endpoint on /healthz, listening on the -// provided address. A containerized tailscale instance is considered healthy if -// it has at least one tailnet IP address. -func runHealthz(addr string, h *healthz) { - lis, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("error listening on the provided health endpoint address %q: %v", addr, err) - } - mux := http.NewServeMux() - mux.Handle("/healthz", h) - log.Printf("Running healthcheck endpoint at %s/healthz", addr) - hs := &http.Server{Handler: mux} - - go func() { - if err := hs.Serve(lis); err != nil { - log.Fatalf("failed running health endpoint: %v", err) - } - }() + return path.Join(dir, kubeutils.TailscaledConfigFileName(maxCompatVer)) } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go new file mode 100644 index 0000000000000..6c22b3eeb651e --- /dev/null +++ b/cmd/containerboot/serve.go @@ -0,0 +1,96 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "bytes" + "context" + "encoding/json" + "log" + "os" + "path/filepath" + "reflect" + "sync/atomic" + "time" + + "github.com/fsnotify/fsnotify" + "tailscale.com/client/tailscale" + "tailscale.com/ipn" +) + +// watchServeConfigChanges watches path for changes, and when it sees one, reads +// the serve config from it, replacing ${TS_CERT_DOMAIN} with certDomain, and +// applies it to lc. It exits when ctx is canceled. cdChanged is a channel that +// is written to when the certDomain changes, causing the serve config to be +// re-read and applied. +func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *tailscale.LocalClient) { + if certDomainAtomic == nil { + panic("cd must not be nil") + } + var tickChan <-chan time.Time + var eventChan <-chan fsnotify.Event + if w, err := fsnotify.NewWatcher(); err != nil { + log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + defer w.Close() + if err := w.Add(filepath.Dir(path)); err != nil { + log.Fatalf("failed to add fsnotify watch: %v", err) + } + eventChan = w.Events + } + + var certDomain string + var prevServeConfig *ipn.ServeConfig + for { + select { + case <-ctx.Done(): + return + case <-cdChanged: + certDomain = *certDomainAtomic.Load() + case <-tickChan: + case <-eventChan: + // We can't do any reasonable filtering on the event because of how + // k8s handles these mounts. So just re-read the file and apply it + // if it's changed. + } + if certDomain == "" { + continue + } + sc, err := readServeConfig(path, certDomain) + if err != nil { + log.Fatalf("failed to read serve config: %v", err) + } + if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { + continue + } + log.Printf("Applying serve config") + if err := lc.SetServeConfig(ctx, sc); err != nil { + log.Fatalf("failed to set serve config: %v", err) + } + prevServeConfig = sc + } +} + +// readServeConfig reads the ipn.ServeConfig from path, replacing +// ${TS_CERT_DOMAIN} with certDomain. +func readServeConfig(path, certDomain string) (*ipn.ServeConfig, error) { + if path == "" { + return nil, nil + } + j, err := os.ReadFile(path) + if err != nil { + return nil, err + } + j = bytes.ReplaceAll(j, []byte("${TS_CERT_DOMAIN}"), []byte(certDomain)) + var sc ipn.ServeConfig + if err := json.Unmarshal(j, &sc); err != nil { + return nil, err + } + return &sc, nil +} diff --git a/cmd/containerboot/services.go b/cmd/containerboot/services.go new file mode 100644 index 0000000000000..4da7286b7ca0a --- /dev/null +++ b/cmd/containerboot/services.go @@ -0,0 +1,571 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net/netip" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubeclient" + "tailscale.com/tailcfg" + "tailscale.com/util/linuxfw" + "tailscale.com/util/mak" +) + +const tailscaleTunInterface = "tailscale0" + +// This file contains functionality to run containerboot as a proxy that can +// route cluster traffic to one or more tailnet targets, based on portmapping +// rules read from a configfile. Currently (9/2024) this is only used for the +// Kubernetes operator egress proxies. + +// egressProxy knows how to configure firewall rules to route cluster traffic to +// one or more tailnet services. +type egressProxy struct { + cfgPath string // path to egress service config file + + nfr linuxfw.NetfilterRunner // never nil + + kc kubeclient.Client // never nil + stateSecret string // name of the kube state Secret + + netmapChan chan ipn.Notify // chan to receive netmap updates on + + podIPv4 string // never empty string, currently only IPv4 is supported + + // tailnetFQDNs is the egress service FQDN to tailnet IP mappings that + // were last used to configure firewall rules for this proxy. + // TODO(irbekrm): target addresses are also stored in the state Secret. + // Evaluate whether we should retrieve them from there and not store in + // memory at all. + targetFQDNs map[string][]netip.Prefix + + // used to configure firewall rules. + tailnetAddrs []netip.Prefix +} + +// run configures egress proxy firewall rules and ensures that the firewall rules are reconfigured when: +// - the mounted egress config has changed +// - the proxy's tailnet IP addresses have changed +// - tailnet IPs have changed for any backend targets specified by tailnet FQDN +func (ep *egressProxy) run(ctx context.Context, n ipn.Notify) error { + var tickChan <-chan time.Time + var eventChan <-chan fsnotify.Event + // TODO (irbekrm): take a look if this can be pulled into a single func + // shared with serve config loader. + if w, err := fsnotify.NewWatcher(); err != nil { + log.Printf("failed to create fsnotify watcher, timer-only mode: %v", err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + defer w.Close() + if err := w.Add(filepath.Dir(ep.cfgPath)); err != nil { + return fmt.Errorf("failed to add fsnotify watch: %w", err) + } + eventChan = w.Events + } + + if err := ep.sync(ctx, n); err != nil { + return err + } + for { + var err error + select { + case <-ctx.Done(): + return nil + case <-tickChan: + err = ep.sync(ctx, n) + case <-eventChan: + log.Printf("config file change detected, ensuring firewall config is up to date...") + err = ep.sync(ctx, n) + case n = <-ep.netmapChan: + shouldResync := ep.shouldResync(n) + if shouldResync { + log.Printf("netmap change detected, ensuring firewall config is up to date...") + err = ep.sync(ctx, n) + } + } + if err != nil { + return fmt.Errorf("error syncing egress service config: %w", err) + } + } +} + +// sync triggers an egress proxy config resync. The resync calculates the diff between config and status to determine if +// any firewall rules need to be updated. Currently using status in state Secret as a reference for what is the current +// firewall configuration is good enough because - the status is keyed by the Pod IP - we crash the Pod on errors such +// as failed firewall update +func (ep *egressProxy) sync(ctx context.Context, n ipn.Notify) error { + cfgs, err := ep.getConfigs() + if err != nil { + return fmt.Errorf("error retrieving egress service configs: %w", err) + } + status, err := ep.getStatus(ctx) + if err != nil { + return fmt.Errorf("error retrieving current egress proxy status: %w", err) + } + newStatus, err := ep.syncEgressConfigs(cfgs, status, n) + if err != nil { + return fmt.Errorf("error syncing egress service configs: %w", err) + } + if !servicesStatusIsEqual(newStatus, status) { + if err := ep.setStatus(ctx, newStatus, n); err != nil { + return fmt.Errorf("error setting egress proxy status: %w", err) + } + } + return nil +} + +// addrsHaveChanged returns true if the provided netmap update contains tailnet address change for this proxy node. +// Netmap must not be nil. +func (ep *egressProxy) addrsHaveChanged(n ipn.Notify) bool { + return !reflect.DeepEqual(ep.tailnetAddrs, n.NetMap.SelfNode.Addresses()) +} + +// syncEgressConfigs adds and deletes firewall rules to match the desired +// configuration. It uses the provided status to determine what is currently +// applied and updates the status after a successful sync. +func (ep *egressProxy) syncEgressConfigs(cfgs *egressservices.Configs, status *egressservices.Status, n ipn.Notify) (*egressservices.Status, error) { + if !(wantsServicesConfigured(cfgs) || hasServicesConfigured(status)) { + return nil, nil + } + + // Delete unnecessary services. + if err := ep.deleteUnnecessaryServices(cfgs, status); err != nil { + return nil, fmt.Errorf("error deleting services: %w", err) + + } + newStatus := &egressservices.Status{} + if !wantsServicesConfigured(cfgs) { + return newStatus, nil + } + + // Add new services, update rules for any that have changed. + rulesPerSvcToAdd := make(map[string][]rule, 0) + rulesPerSvcToDelete := make(map[string][]rule, 0) + for svcName, cfg := range *cfgs { + tailnetTargetIPs, err := ep.tailnetTargetIPsForSvc(cfg, n) + if err != nil { + return nil, fmt.Errorf("error determining tailnet target IPs: %w", err) + } + rulesToAdd, rulesToDelete, err := updatesForCfg(svcName, cfg, status, tailnetTargetIPs) + if err != nil { + return nil, fmt.Errorf("error validating service changes: %v", err) + } + log.Printf("syncegressservices: looking at svc %s rulesToAdd %d rulesToDelete %d", svcName, len(rulesToAdd), len(rulesToDelete)) + if len(rulesToAdd) != 0 { + mak.Set(&rulesPerSvcToAdd, svcName, rulesToAdd) + } + if len(rulesToDelete) != 0 { + mak.Set(&rulesPerSvcToDelete, svcName, rulesToDelete) + } + if len(rulesToAdd) != 0 || ep.addrsHaveChanged(n) { + // For each tailnet target, set up SNAT from the local tailnet device address of the matching + // family. + for _, t := range tailnetTargetIPs { + var local netip.Addr + for _, pfx := range n.NetMap.SelfNode.Addresses().All() { + if !pfx.IsSingleIP() { + continue + } + if pfx.Addr().Is4() != t.Is4() { + continue + } + local = pfx.Addr() + break + } + if !local.IsValid() { + return nil, fmt.Errorf("no valid local IP: %v", local) + } + if err := ep.nfr.EnsureSNATForDst(local, t); err != nil { + return nil, fmt.Errorf("error setting up SNAT rule: %w", err) + } + } + } + // Update the status. Status will be written back to the state Secret by the caller. + mak.Set(&newStatus.Services, svcName, &egressservices.ServiceStatus{TailnetTargetIPs: tailnetTargetIPs, TailnetTarget: cfg.TailnetTarget, Ports: cfg.Ports}) + } + + // Actually apply the firewall rules. + if err := ensureRulesAdded(rulesPerSvcToAdd, ep.nfr); err != nil { + return nil, fmt.Errorf("error adding rules: %w", err) + } + if err := ensureRulesDeleted(rulesPerSvcToDelete, ep.nfr); err != nil { + return nil, fmt.Errorf("error deleting rules: %w", err) + } + + return newStatus, nil +} + +// updatesForCfg calculates any rules that need to be added or deleted for an individucal egress service config. +func updatesForCfg(svcName string, cfg egressservices.Config, status *egressservices.Status, tailnetTargetIPs []netip.Addr) ([]rule, []rule, error) { + rulesToAdd := make([]rule, 0) + rulesToDelete := make([]rule, 0) + currentConfig, ok := lookupCurrentConfig(svcName, status) + + // If no rules for service are present yet, add them all. + if !ok { + for _, t := range tailnetTargetIPs { + for ports := range cfg.Ports { + log.Printf("syncegressservices: svc %s adding port %v", svcName, ports) + rulesToAdd = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: t}) + } + } + return rulesToAdd, rulesToDelete, nil + } + + // If there are no backend targets available, delete any currently configured rules. + if len(tailnetTargetIPs) == 0 { + log.Printf("tailnet target for egress service %s does not have any backend addresses, deleting all rules", svcName) + for _, ip := range currentConfig.TailnetTargetIPs { + for ports := range currentConfig.Ports { + rulesToDelete = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) + } + } + return rulesToAdd, rulesToDelete, nil + } + + // If there are rules present for backend targets that no longer match, delete them. + for _, ip := range currentConfig.TailnetTargetIPs { + var found bool + for _, wantsIP := range tailnetTargetIPs { + if reflect.DeepEqual(ip, wantsIP) { + found = true + break + } + } + if !found { + for ports := range currentConfig.Ports { + rulesToDelete = append(rulesToDelete, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) + } + } + } + + // Sync rules for the currently wanted backend targets. + for _, ip := range tailnetTargetIPs { + + // If the backend target is not yet present in status, add all rules. + var found bool + for _, gotIP := range currentConfig.TailnetTargetIPs { + if reflect.DeepEqual(ip, gotIP) { + found = true + break + } + } + if !found { + for ports := range cfg.Ports { + rulesToAdd = append(rulesToAdd, rule{tailnetPort: ports.TargetPort, containerPort: ports.MatchPort, protocol: ports.Protocol, tailnetIP: ip}) + } + continue + } + + // If the backend target is present in status, check that the + // currently applied rules are up to date. + + // Delete any current portmappings that are no longer present in config. + for port := range currentConfig.Ports { + if _, ok := cfg.Ports[port]; ok { + continue + } + rulesToDelete = append(rulesToDelete, rule{tailnetPort: port.TargetPort, containerPort: port.MatchPort, protocol: port.Protocol, tailnetIP: ip}) + } + + // Add any new portmappings. + for port := range cfg.Ports { + if _, ok := currentConfig.Ports[port]; ok { + continue + } + rulesToAdd = append(rulesToAdd, rule{tailnetPort: port.TargetPort, containerPort: port.MatchPort, protocol: port.Protocol, tailnetIP: ip}) + } + } + return rulesToAdd, rulesToDelete, nil +} + +// deleteUnneccessaryServices ensure that any services found on status, but not +// present in config are deleted. +func (ep *egressProxy) deleteUnnecessaryServices(cfgs *egressservices.Configs, status *egressservices.Status) error { + if !hasServicesConfigured(status) { + return nil + } + if !wantsServicesConfigured(cfgs) { + for svcName, svc := range status.Services { + log.Printf("service %s is no longer required, deleting", svcName) + if err := ensureServiceDeleted(svcName, svc, ep.nfr); err != nil { + return fmt.Errorf("error deleting service %s: %w", svcName, err) + } + } + return nil + } + + for svcName, svc := range status.Services { + if _, ok := (*cfgs)[svcName]; !ok { + log.Printf("service %s is no longer required, deleting", svcName) + if err := ensureServiceDeleted(svcName, svc, ep.nfr); err != nil { + return fmt.Errorf("error deleting service %s: %w", svcName, err) + } + // TODO (irbekrm): also delete the SNAT rule here + } + } + return nil +} + +// getConfigs gets the mounted egress service configuration. +func (ep *egressProxy) getConfigs() (*egressservices.Configs, error) { + j, err := os.ReadFile(ep.cfgPath) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + if len(j) == 0 || string(j) == "" { + return nil, nil + } + cfg := &egressservices.Configs{} + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// getStatus gets the current status of the configured firewall. The current +// status is stored in state Secret. Returns nil status if no status that +// applies to the current proxy Pod was found. Uses the Pod IP to determine if a +// status found in the state Secret applies to this proxy Pod. +func (ep *egressProxy) getStatus(ctx context.Context) (*egressservices.Status, error) { + secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving state secret: %w", err) + } + status := &egressservices.Status{} + raw, ok := secret.Data[egressservices.KeyEgressServices] + if !ok { + return nil, nil + } + if err := json.Unmarshal([]byte(raw), status); err != nil { + return nil, fmt.Errorf("error unmarshalling previous config: %w", err) + } + if reflect.DeepEqual(status.PodIPv4, ep.podIPv4) { + return status, nil + } + return nil, nil +} + +// setStatus writes egress proxy's currently configured firewall to the state +// Secret and updates proxy's tailnet addresses. +func (ep *egressProxy) setStatus(ctx context.Context, status *egressservices.Status, n ipn.Notify) error { + // Pod IP is used to determine if a stored status applies to THIS proxy Pod. + if status == nil { + status = &egressservices.Status{} + } + status.PodIPv4 = ep.podIPv4 + secret, err := ep.kc.GetSecret(ctx, ep.stateSecret) + if err != nil { + return fmt.Errorf("error retrieving state Secret: %w", err) + } + bs, err := json.Marshal(status) + if err != nil { + return fmt.Errorf("error marshalling service config: %w", err) + } + secret.Data[egressservices.KeyEgressServices] = bs + patch := kubeclient.JSONPatch{ + Op: "replace", + Path: fmt.Sprintf("/data/%s", egressservices.KeyEgressServices), + Value: bs, + } + if err := ep.kc.JSONPatchSecret(ctx, ep.stateSecret, []kubeclient.JSONPatch{patch}); err != nil { + return fmt.Errorf("error patching state Secret: %w", err) + } + ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() + return nil +} + +// tailnetTargetIPsForSvc returns the tailnet IPs to which traffic for this +// egress service should be proxied. The egress service can be configured by IP +// or by FQDN. If it's configured by IP, just return that. If it's configured by +// FQDN, resolve the FQDN and return the resolved IPs. It checks if the +// netfilter runner supports IPv6 NAT and skips any IPv6 addresses if it +// doesn't. +func (ep *egressProxy) tailnetTargetIPsForSvc(svc egressservices.Config, n ipn.Notify) (addrs []netip.Addr, err error) { + if svc.TailnetTarget.IP != "" { + addr, err := netip.ParseAddr(svc.TailnetTarget.IP) + if err != nil { + return nil, fmt.Errorf("error parsing tailnet target IP: %w", err) + } + if addr.Is6() && !ep.nfr.HasIPV6NAT() { + log.Printf("tailnet target is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode. This will probably not work.") + return addrs, nil + } + return []netip.Addr{addr}, nil + } + + if svc.TailnetTarget.FQDN == "" { + return nil, errors.New("unexpected egress service config- neither tailnet target IP nor FQDN is set") + } + if n.NetMap == nil { + log.Printf("netmap is not available, unable to determine backend addresses for %s", svc.TailnetTarget.FQDN) + return addrs, nil + } + var ( + node tailcfg.NodeView + nodeFound bool + ) + for _, nn := range n.NetMap.Peers { + if equalFQDNs(nn.Name(), svc.TailnetTarget.FQDN) { + node = nn + nodeFound = true + break + } + } + if nodeFound { + for _, addr := range node.Addresses().AsSlice() { + if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { + log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) + continue + } + addrs = append(addrs, addr.Addr()) + } + // Egress target endpoints configured via FQDN are stored, so + // that we can determine if a netmap update should trigger a + // resync. + mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, node.Addresses().AsSlice()) + } + return addrs, nil +} + +// shouldResync parses netmap update and returns true if the update contains +// changes for which the egress proxy's firewall should be reconfigured. +func (ep *egressProxy) shouldResync(n ipn.Notify) bool { + if n.NetMap == nil { + return false + } + + // If proxy's tailnet addresses have changed, resync. + if !reflect.DeepEqual(n.NetMap.SelfNode.Addresses().AsSlice(), ep.tailnetAddrs) { + log.Printf("node addresses have changed, trigger egress config resync") + ep.tailnetAddrs = n.NetMap.SelfNode.Addresses().AsSlice() + return true + } + + // If the IPs for any of the egress services configured via FQDN have + // changed, resync. + for fqdn, ips := range ep.targetFQDNs { + for _, nn := range n.NetMap.Peers { + if equalFQDNs(nn.Name(), fqdn) { + if !reflect.DeepEqual(ips, nn.Addresses().AsSlice()) { + log.Printf("backend addresses for egress target %q have changed old IPs %v, new IPs %v trigger egress config resync", nn.Name(), ips, nn.Addresses().AsSlice()) + } + return true + } + } + } + return false +} + +// ensureServiceDeleted ensures that any rules for an egress service are removed +// from the firewall configuration. +func ensureServiceDeleted(svcName string, svc *egressservices.ServiceStatus, nfr linuxfw.NetfilterRunner) error { + + // Note that the portmap is needed for iptables based firewall only. + // Nftables group rules for a service in a chain, so there is no need to + // specify individual portmapping based rules. + pms := make([]linuxfw.PortMap, 0) + for pm := range svc.Ports { + pms = append(pms, linuxfw.PortMap{MatchPort: pm.MatchPort, TargetPort: pm.TargetPort, Protocol: pm.Protocol}) + } + + if err := nfr.DeleteSvc(svcName, tailscaleTunInterface, svc.TailnetTargetIPs, pms); err != nil { + return fmt.Errorf("error deleting service %s: %w", svcName, err) + } + return nil +} + +// ensureRulesAdded ensures that all portmapping rules are added to the firewall +// configuration. For any rules that already exist, calling this function is a +// no-op. In case of nftables, a service consists of one or two (one per IP +// family) chains that conain the portmapping rules for the service and the +// chains as needed when this function is called. +func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { + for svc, rules := range rulesPerSvc { + for _, rule := range rules { + log.Printf("ensureRulesAdded svc %s tailnetTarget %s container port %d tailnet port %d protocol %s", svc, rule.tailnetIP, rule.containerPort, rule.tailnetPort, rule.protocol) + if err := nfr.EnsurePortMapRuleForSvc(svc, tailscaleTunInterface, rule.tailnetIP, linuxfw.PortMap{MatchPort: rule.containerPort, TargetPort: rule.tailnetPort, Protocol: rule.protocol}); err != nil { + return fmt.Errorf("error ensuring rule: %w", err) + } + } + } + return nil +} + +// ensureRulesDeleted ensures that the given rules are deleted from the firewall +// configuration. For any rules that do not exist, calling this funcion is a +// no-op. +func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { + for svc, rules := range rulesPerSvc { + for _, rule := range rules { + log.Printf("ensureRulesDeleted svc %s tailnetTarget %s container port %d tailnet port %d protocol %s", svc, rule.tailnetIP, rule.containerPort, rule.tailnetPort, rule.protocol) + if err := nfr.DeletePortMapRuleForSvc(svc, tailscaleTunInterface, rule.tailnetIP, linuxfw.PortMap{MatchPort: rule.containerPort, TargetPort: rule.tailnetPort, Protocol: rule.protocol}); err != nil { + return fmt.Errorf("error deleting rule: %w", err) + } + } + } + return nil +} + +func lookupCurrentConfig(svcName string, status *egressservices.Status) (*egressservices.ServiceStatus, bool) { + if status == nil || len(status.Services) == 0 { + return nil, false + } + c, ok := status.Services[svcName] + return c, ok +} + +func equalFQDNs(s, s1 string) bool { + s, _ = strings.CutSuffix(s, ".") + s1, _ = strings.CutSuffix(s1, ".") + return strings.EqualFold(s, s1) +} + +// rule contains configuration for an egress proxy firewall rule. +type rule struct { + containerPort uint16 // port to match incoming traffic + tailnetPort uint16 // tailnet service port + tailnetIP netip.Addr // tailnet service IP + protocol string +} + +func wantsServicesConfigured(cfgs *egressservices.Configs) bool { + return cfgs != nil && len(*cfgs) != 0 +} + +func hasServicesConfigured(status *egressservices.Status) bool { + return status != nil && len(status.Services) != 0 +} + +func servicesStatusIsEqual(st, st1 *egressservices.Status) bool { + if st == nil && st1 == nil { + return true + } + if st == nil || st1 == nil { + return false + } + st.PodIPv4 = "" + st1.PodIPv4 = "" + return reflect.DeepEqual(*st, *st1) +} diff --git a/cmd/containerboot/services_test.go b/cmd/containerboot/services_test.go new file mode 100644 index 0000000000000..46f6db1cf6d0e --- /dev/null +++ b/cmd/containerboot/services_test.go @@ -0,0 +1,175 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "net/netip" + "reflect" + "testing" + + "tailscale.com/kube/egressservices" +) + +func Test_updatesForSvc(t *testing.T) { + tailnetIPv4, tailnetIPv6 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + tailnetIPv4_1, tailnetIPv6_1 := netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("fd7a:115c:a1e0::4101:512f") + ports := map[egressservices.PortMap]struct{}{{Protocol: "tcp", MatchPort: 4003, TargetPort: 80}: {}} + ports1 := map[egressservices.PortMap]struct{}{{Protocol: "udp", MatchPort: 4004, TargetPort: 53}: {}} + ports2 := map[egressservices.PortMap]struct{}{{Protocol: "tcp", MatchPort: 4003, TargetPort: 80}: {}, + {Protocol: "tcp", MatchPort: 4005, TargetPort: 443}: {}} + fqdnSpec := egressservices.Config{ + TailnetTarget: egressservices.TailnetTarget{FQDN: "test"}, + Ports: ports, + } + fqdnSpec1 := egressservices.Config{ + TailnetTarget: egressservices.TailnetTarget{FQDN: "test"}, + Ports: ports1, + } + fqdnSpec2 := egressservices.Config{ + TailnetTarget: egressservices.TailnetTarget{IP: tailnetIPv4.String()}, + Ports: ports, + } + fqdnSpec3 := egressservices.Config{ + TailnetTarget: egressservices.TailnetTarget{IP: tailnetIPv4.String()}, + Ports: ports2, + } + r := rule{containerPort: 4003, tailnetPort: 80, protocol: "tcp", tailnetIP: tailnetIPv4} + r1 := rule{containerPort: 4003, tailnetPort: 80, protocol: "tcp", tailnetIP: tailnetIPv6} + r2 := rule{tailnetPort: 53, containerPort: 4004, protocol: "udp", tailnetIP: tailnetIPv4} + r3 := rule{tailnetPort: 53, containerPort: 4004, protocol: "udp", tailnetIP: tailnetIPv6} + r4 := rule{containerPort: 4003, tailnetPort: 80, protocol: "tcp", tailnetIP: tailnetIPv4_1} + r5 := rule{containerPort: 4003, tailnetPort: 80, protocol: "tcp", tailnetIP: tailnetIPv6_1} + r6 := rule{containerPort: 4005, tailnetPort: 443, protocol: "tcp", tailnetIP: tailnetIPv4} + + tests := []struct { + name string + svcName string + tailnetTargetIPs []netip.Addr + podIP string + spec egressservices.Config + status *egressservices.Status + wantRulesToAdd []rule + wantRulesToDelete []rule + }{ + { + name: "add_fqdn_svc_that_does_not_yet_exist", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4, tailnetIPv6}, + spec: fqdnSpec, + status: &egressservices.Status{}, + wantRulesToAdd: []rule{r, r1}, + wantRulesToDelete: []rule{}, + }, + { + name: "fqdn_svc_already_exists", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4, tailnetIPv6}, + spec: fqdnSpec, + status: &egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{"test": { + TailnetTargetIPs: []netip.Addr{tailnetIPv4, tailnetIPv6}, + TailnetTarget: egressservices.TailnetTarget{FQDN: "test"}, + Ports: ports, + }}}, + wantRulesToAdd: []rule{}, + wantRulesToDelete: []rule{}, + }, + { + name: "fqdn_svc_already_exists_add_port_remove_port", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4, tailnetIPv6}, + spec: fqdnSpec1, + status: &egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{"test": { + TailnetTargetIPs: []netip.Addr{tailnetIPv4, tailnetIPv6}, + TailnetTarget: egressservices.TailnetTarget{FQDN: "test"}, + Ports: ports, + }}}, + wantRulesToAdd: []rule{r2, r3}, + wantRulesToDelete: []rule{r, r1}, + }, + { + name: "fqdn_svc_already_exists_change_fqdn_backend_ips", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4_1, tailnetIPv6_1}, + spec: fqdnSpec, + status: &egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{"test": { + TailnetTargetIPs: []netip.Addr{tailnetIPv4, tailnetIPv6}, + TailnetTarget: egressservices.TailnetTarget{FQDN: "test"}, + Ports: ports, + }}}, + wantRulesToAdd: []rule{r4, r5}, + wantRulesToDelete: []rule{r, r1}, + }, + { + name: "add_ip_service", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4}, + spec: fqdnSpec2, + status: &egressservices.Status{}, + wantRulesToAdd: []rule{r}, + wantRulesToDelete: []rule{}, + }, + { + name: "add_ip_service_already_exists", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4}, + spec: fqdnSpec2, + status: &egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{"test": { + TailnetTargetIPs: []netip.Addr{tailnetIPv4}, + TailnetTarget: egressservices.TailnetTarget{IP: tailnetIPv4.String()}, + Ports: ports, + }}}, + wantRulesToAdd: []rule{}, + wantRulesToDelete: []rule{}, + }, + { + name: "ip_service_add_port", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4}, + spec: fqdnSpec3, + status: &egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{"test": { + TailnetTargetIPs: []netip.Addr{tailnetIPv4}, + TailnetTarget: egressservices.TailnetTarget{IP: tailnetIPv4.String()}, + Ports: ports, + }}}, + wantRulesToAdd: []rule{r6}, + wantRulesToDelete: []rule{}, + }, + { + name: "ip_service_delete_port", + svcName: "test", + tailnetTargetIPs: []netip.Addr{tailnetIPv4}, + spec: fqdnSpec, + status: &egressservices.Status{ + Services: map[string]*egressservices.ServiceStatus{"test": { + TailnetTargetIPs: []netip.Addr{tailnetIPv4}, + TailnetTarget: egressservices.TailnetTarget{IP: tailnetIPv4.String()}, + Ports: ports2, + }}}, + wantRulesToAdd: []rule{}, + wantRulesToDelete: []rule{r6}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotRulesToAdd, gotRulesToDelete, err := updatesForCfg(tt.svcName, tt.spec, tt.status, tt.tailnetTargetIPs) + if err != nil { + t.Errorf("updatesForSvc() unexpected error %v", err) + return + } + if !reflect.DeepEqual(gotRulesToAdd, tt.wantRulesToAdd) { + t.Errorf("updatesForSvc() got rulesToAdd = \n%v\n want rulesToAdd \n%v", gotRulesToAdd, tt.wantRulesToAdd) + } + if !reflect.DeepEqual(gotRulesToDelete, tt.wantRulesToDelete) { + t.Errorf("updatesForSvc() got rulesToDelete = \n%v\n want rulesToDelete \n%v", gotRulesToDelete, tt.wantRulesToDelete) + } + }) + } +} diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go new file mode 100644 index 0000000000000..742713e7700de --- /dev/null +++ b/cmd/containerboot/settings.go @@ -0,0 +1,324 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "errors" + "fmt" + "log" + "net/netip" + "os" + "path" + "strconv" + "strings" + + "tailscale.com/ipn/conffile" + "tailscale.com/kube/kubeclient" +) + +// settings is all the configuration for containerboot. +type settings struct { + AuthKey string + Hostname string + Routes *string + // ProxyTargetIP is the destination IP to which all incoming + // Tailscale traffic should be proxied. If empty, no proxying + // is done. This is typically a locally reachable IP. + ProxyTargetIP string + // ProxyTargetDNSName is a DNS name to whose backing IP addresses all + // incoming Tailscale traffic should be proxied. + ProxyTargetDNSName string + // TailnetTargetIP is the destination IP to which all incoming + // non-Tailscale traffic should be proxied. This is typically a + // Tailscale IP. + TailnetTargetIP string + // TailnetTargetFQDN is an MagicDNS name to which all incoming + // non-Tailscale traffic should be proxied. This must be a full Tailnet + // node FQDN. + TailnetTargetFQDN string + ServeConfigPath string + DaemonExtraArgs string + ExtraArgs string + InKubernetes bool + UserspaceMode bool + StateDir string + AcceptDNS *bool + KubeSecret string + SOCKSProxyAddr string + HTTPProxyAddr string + Socket string + AuthOnce bool + Root string + KubernetesCanPatch bool + TailscaledConfigFilePath string + EnableForwardingOptimizations bool + // If set to true and, if this containerboot instance is a Kubernetes + // ingress proxy, set up rules to forward incoming cluster traffic to be + // forwarded to the ingress target in cluster. + AllowProxyingClusterTrafficViaIngress bool + // PodIP is the IP of the Pod if running in Kubernetes. This is used + // when setting up rules to proxy cluster traffic to cluster ingress + // target. + // Deprecated: use PodIPv4, PodIPv6 instead to support dual stack clusters + PodIP string + PodIPv4 string + PodIPv6 string + HealthCheckAddrPort string + EgressSvcsCfgPath string +} + +func configFromEnv() (*settings, error) { + cfg := &settings{ + AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), + Hostname: defaultEnv("TS_HOSTNAME", ""), + Routes: defaultEnvStringPointer("TS_ROUTES"), + ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), + ProxyTargetIP: defaultEnv("TS_DEST_IP", ""), + ProxyTargetDNSName: defaultEnv("TS_EXPERIMENTAL_DEST_DNS_NAME", ""), + TailnetTargetIP: defaultEnv("TS_TAILNET_TARGET_IP", ""), + TailnetTargetFQDN: defaultEnv("TS_TAILNET_TARGET_FQDN", ""), + DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""), + ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""), + InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "", + UserspaceMode: defaultBool("TS_USERSPACE", true), + StateDir: defaultEnv("TS_STATE_DIR", ""), + AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"), + KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"), + SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""), + HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), + Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), + AuthOnce: defaultBool("TS_AUTH_ONCE", false), + Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"), + TailscaledConfigFilePath: tailscaledConfigFilePath(), + AllowProxyingClusterTrafficViaIngress: defaultBool("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", false), + PodIP: defaultEnv("POD_IP", ""), + EnableForwardingOptimizations: defaultBool("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS", false), + HealthCheckAddrPort: defaultEnv("TS_HEALTHCHECK_ADDR_PORT", ""), + EgressSvcsCfgPath: defaultEnv("TS_EGRESS_SERVICES_CONFIG_PATH", ""), + } + podIPs, ok := os.LookupEnv("POD_IPS") + if ok { + ips := strings.Split(podIPs, ",") + if len(ips) > 2 { + return nil, fmt.Errorf("POD_IPs can contain at most 2 IPs, got %d (%v)", len(ips), ips) + } + for _, ip := range ips { + parsed, err := netip.ParseAddr(ip) + if err != nil { + return nil, fmt.Errorf("error parsing IP address %s: %w", ip, err) + } + if parsed.Is4() { + cfg.PodIPv4 = parsed.String() + continue + } + cfg.PodIPv6 = parsed.String() + } + } + if err := cfg.validate(); err != nil { + return nil, fmt.Errorf("invalid configuration: %v", err) + } + return cfg, nil +} + +func (s *settings) validate() error { + if s.TailscaledConfigFilePath != "" { + dir, file := path.Split(s.TailscaledConfigFilePath) + if _, err := os.Stat(dir); err != nil { + return fmt.Errorf("error validating whether directory with tailscaled config file %s exists: %w", dir, err) + } + if _, err := os.Stat(s.TailscaledConfigFilePath); err != nil { + return fmt.Errorf("error validating whether tailscaled config directory %q contains tailscaled config for current capability version %q: %w. If this is a Tailscale Kubernetes operator proxy, please ensure that the version of the operator is not older than the version of the proxy", dir, file, err) + } + if _, err := conffile.Load(s.TailscaledConfigFilePath); err != nil { + return fmt.Errorf("error validating tailscaled configfile contents: %w", err) + } + } + if s.ProxyTargetIP != "" && s.UserspaceMode { + return errors.New("TS_DEST_IP is not supported with TS_USERSPACE") + } + if s.ProxyTargetDNSName != "" && s.UserspaceMode { + return errors.New("TS_EXPERIMENTAL_DEST_DNS_NAME is not supported with TS_USERSPACE") + } + if s.ProxyTargetDNSName != "" && s.ProxyTargetIP != "" { + return errors.New("TS_EXPERIMENTAL_DEST_DNS_NAME and TS_DEST_IP cannot both be set") + } + if s.TailnetTargetIP != "" && s.UserspaceMode { + return errors.New("TS_TAILNET_TARGET_IP is not supported with TS_USERSPACE") + } + if s.TailnetTargetFQDN != "" && s.UserspaceMode { + return errors.New("TS_TAILNET_TARGET_FQDN is not supported with TS_USERSPACE") + } + if s.TailnetTargetFQDN != "" && s.TailnetTargetIP != "" { + return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set") + } + if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "") { + return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.") + } + if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode { + return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode") + } + if s.AllowProxyingClusterTrafficViaIngress && s.ServeConfigPath == "" { + return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is set but this is not a cluster ingress proxy") + } + if s.AllowProxyingClusterTrafficViaIngress && s.PodIP == "" { + return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is set but POD_IP is not set") + } + if s.EnableForwardingOptimizations && s.UserspaceMode { + return errors.New("TS_EXPERIMENTAL_ENABLE_FORWARDING_OPTIMIZATIONS is not supported in userspace mode") + } + if s.HealthCheckAddrPort != "" { + if _, err := netip.ParseAddrPort(s.HealthCheckAddrPort); err != nil { + return fmt.Errorf("error parsing TS_HEALTH_CHECK_ADDR_PORT value %q: %w", s.HealthCheckAddrPort, err) + } + } + return nil +} + +// setupKube is responsible for doing any necessary configuration and checks to +// ensure that tailscale state storage and authentication mechanism will work on +// Kubernetes. +func (cfg *settings) setupKube(ctx context.Context) error { + if cfg.KubeSecret == "" { + return nil + } + canPatch, canCreate, err := kc.CheckSecretPermissions(ctx, cfg.KubeSecret) + if err != nil { + return fmt.Errorf("some Kubernetes permissions are missing, please check your RBAC configuration: %v", err) + } + cfg.KubernetesCanPatch = canPatch + + s, err := kc.GetSecret(ctx, cfg.KubeSecret) + if err != nil { + if !kubeclient.IsNotFoundErr(err) { + return fmt.Errorf("getting Tailscale state Secret %s: %v", cfg.KubeSecret, err) + } + + if !canCreate { + return fmt.Errorf("tailscale state Secret %s does not exist and we don't have permissions to create it. "+ + "If you intend to store tailscale state elsewhere than a Kubernetes Secret, "+ + "you can explicitly set TS_KUBE_SECRET env var to an empty string. "+ + "Else ensure that RBAC is set up that allows the service account associated with this installation to create Secrets.", cfg.KubeSecret) + } + } + + // Return early if we already have an auth key. + if cfg.AuthKey != "" || isOneStepConfig(cfg) { + return nil + } + + if s == nil { + log.Print("TS_AUTHKEY not provided and state Secret does not exist, login will be interactive if needed.") + return nil + } + + keyBytes, _ := s.Data["authkey"] + key := string(keyBytes) + + if key != "" { + // Enforce that we must be able to patch out the authkey after + // authenticating if you want to use this feature. This avoids + // us having to deal with the case where we might leave behind + // an unnecessary reusable authkey in a secret, like a rake in + // the grass. + if !cfg.KubernetesCanPatch { + return errors.New("authkey found in TS_KUBE_SECRET, but the pod doesn't have patch permissions on the Secret to manage the authkey.") + } + cfg.AuthKey = key + } + + log.Print("No authkey found in state Secret and TS_AUTHKEY not provided, login will be interactive if needed.") + + return nil +} + +// isTwoStepConfigAuthOnce returns true if the Tailscale node should be configured +// in two steps and login should only happen once. +// Step 1: run 'tailscaled' +// Step 2): +// A) if this is the first time starting this node run 'tailscale up --authkey ' +// B) if this is not the first time starting this node run 'tailscale set '. +func isTwoStepConfigAuthOnce(cfg *settings) bool { + return cfg.AuthOnce && cfg.TailscaledConfigFilePath == "" +} + +// isTwoStepConfigAlwaysAuth returns true if the Tailscale node should be configured +// in two steps and we should log in every time it starts. +// Step 1: run 'tailscaled' +// Step 2): run 'tailscale up --authkey ' +func isTwoStepConfigAlwaysAuth(cfg *settings) bool { + return !cfg.AuthOnce && cfg.TailscaledConfigFilePath == "" +} + +// isOneStepConfig returns true if the Tailscale node should always be ran and +// configured in a single step by running 'tailscaled ' +func isOneStepConfig(cfg *settings) bool { + return cfg.TailscaledConfigFilePath != "" +} + +// isL3Proxy returns true if the Tailscale node needs to be configured to act +// as an L3 proxy, proxying to an endpoint provided via one of the config env +// vars. +func isL3Proxy(cfg *settings) bool { + return cfg.ProxyTargetIP != "" || cfg.ProxyTargetDNSName != "" || cfg.TailnetTargetIP != "" || cfg.TailnetTargetFQDN != "" || cfg.AllowProxyingClusterTrafficViaIngress || cfg.EgressSvcsCfgPath != "" +} + +// hasKubeStateStore returns true if the state must be stored in a Kubernetes +// Secret. +func hasKubeStateStore(cfg *settings) bool { + return cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" +} + +// defaultEnv returns the value of the given envvar name, or defVal if +// unset. +func defaultEnv(name, defVal string) string { + if v, ok := os.LookupEnv(name); ok { + return v + } + return defVal +} + +// defaultEnvStringPointer returns a pointer to the given envvar value if set, else +// returns nil. This is useful in cases where we need to distinguish between a +// variable being set to empty string vs unset. +func defaultEnvStringPointer(name string) *string { + if v, ok := os.LookupEnv(name); ok { + return &v + } + return nil +} + +// defaultEnvBoolPointer returns a pointer to the given envvar value if set, else +// returns nil. This is useful in cases where we need to distinguish between a +// variable being explicitly set to false vs unset. +func defaultEnvBoolPointer(name string) *bool { + v := os.Getenv(name) + ret, err := strconv.ParseBool(v) + if err != nil { + return nil + } + return &ret +} + +func defaultEnvs(names []string, defVal string) string { + for _, name := range names { + if v, ok := os.LookupEnv(name); ok { + return v + } + } + return defVal +} + +// defaultBool returns the boolean value of the given envvar name, or +// defVal if unset or not a bool. +func defaultBool(name string, defVal bool) bool { + v := os.Getenv(name) + ret, err := strconv.ParseBool(v) + if err != nil { + return defVal + } + return ret +} diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go new file mode 100644 index 0000000000000..53fb7e703be45 --- /dev/null +++ b/cmd/containerboot/tailscaled.go @@ -0,0 +1,162 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "errors" + "fmt" + "io/fs" + "log" + "os" + "os/exec" + "strings" + "syscall" + "time" + + "tailscale.com/client/tailscale" +) + +func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient, *os.Process, error) { + args := tailscaledArgs(cfg) + // tailscaled runs without context, since it needs to persist + // beyond the startup timeout in ctx. + cmd := exec.Command("tailscaled", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + log.Printf("Starting tailscaled") + if err := cmd.Start(); err != nil { + return nil, nil, fmt.Errorf("starting tailscaled failed: %v", err) + } + + // Wait for the socket file to appear, otherwise API ops will racily fail. + log.Printf("Waiting for tailscaled socket") + for { + if ctx.Err() != nil { + log.Fatalf("Timed out waiting for tailscaled socket") + } + _, err := os.Stat(cfg.Socket) + if errors.Is(err, fs.ErrNotExist) { + time.Sleep(100 * time.Millisecond) + continue + } else if err != nil { + log.Fatalf("Waiting for tailscaled socket: %v", err) + } + break + } + + tsClient := &tailscale.LocalClient{ + Socket: cfg.Socket, + UseSocketOnly: true, + } + + return tsClient, cmd.Process, nil +} + +// tailscaledArgs uses cfg to construct the argv for tailscaled. +func tailscaledArgs(cfg *settings) []string { + args := []string{"--socket=" + cfg.Socket} + switch { + case cfg.InKubernetes && cfg.KubeSecret != "": + args = append(args, "--state=kube:"+cfg.KubeSecret) + if cfg.StateDir == "" { + cfg.StateDir = "/tmp" + } + fallthrough + case cfg.StateDir != "": + args = append(args, "--statedir="+cfg.StateDir) + default: + args = append(args, "--state=mem:", "--statedir=/tmp") + } + + if cfg.UserspaceMode { + args = append(args, "--tun=userspace-networking") + } else if err := ensureTunFile(cfg.Root); err != nil { + log.Fatalf("ensuring that /dev/net/tun exists: %v", err) + } + + if cfg.SOCKSProxyAddr != "" { + args = append(args, "--socks5-server="+cfg.SOCKSProxyAddr) + } + if cfg.HTTPProxyAddr != "" { + args = append(args, "--outbound-http-proxy-listen="+cfg.HTTPProxyAddr) + } + if cfg.TailscaledConfigFilePath != "" { + args = append(args, "--config="+cfg.TailscaledConfigFilePath) + } + if cfg.DaemonExtraArgs != "" { + args = append(args, strings.Fields(cfg.DaemonExtraArgs)...) + } + return args +} + +// tailscaleUp uses cfg to run 'tailscale up' everytime containerboot starts, or +// if TS_AUTH_ONCE is set, only the first time containerboot starts. +func tailscaleUp(ctx context.Context, cfg *settings) error { + args := []string{"--socket=" + cfg.Socket, "up"} + if cfg.AcceptDNS != nil && *cfg.AcceptDNS { + args = append(args, "--accept-dns=true") + } else { + args = append(args, "--accept-dns=false") + } + if cfg.AuthKey != "" { + args = append(args, "--authkey="+cfg.AuthKey) + } + // --advertise-routes can be passed an empty string to configure a + // device (that might have previously advertised subnet routes) to not + // advertise any routes. Respect an empty string passed by a user and + // use it to explicitly unset the routes. + if cfg.Routes != nil { + args = append(args, "--advertise-routes="+*cfg.Routes) + } + if cfg.Hostname != "" { + args = append(args, "--hostname="+cfg.Hostname) + } + if cfg.ExtraArgs != "" { + args = append(args, strings.Fields(cfg.ExtraArgs)...) + } + log.Printf("Running 'tailscale up'") + cmd := exec.CommandContext(ctx, "tailscale", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("tailscale up failed: %v", err) + } + return nil +} + +// tailscaleSet uses cfg to run 'tailscale set' to set any known configuration +// options that are passed in via environment variables. This is run after the +// node is in Running state and only if TS_AUTH_ONCE is set. +func tailscaleSet(ctx context.Context, cfg *settings) error { + args := []string{"--socket=" + cfg.Socket, "set"} + if cfg.AcceptDNS != nil && *cfg.AcceptDNS { + args = append(args, "--accept-dns=true") + } else { + args = append(args, "--accept-dns=false") + } + // --advertise-routes can be passed an empty string to configure a + // device (that might have previously advertised subnet routes) to not + // advertise any routes. Respect an empty string passed by a user and + // use it to explicitly unset the routes. + if cfg.Routes != nil { + args = append(args, "--advertise-routes="+*cfg.Routes) + } + if cfg.Hostname != "" { + args = append(args, "--hostname="+cfg.Hostname) + } + log.Printf("Running 'tailscale set'") + cmd := exec.CommandContext(ctx, "tailscale", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("tailscale set failed: %v", err) + } + return nil +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 2f6f160c8f960..417dbcfb0deb7 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -128,7 +128,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/tsweb from tailscale.com/cmd/derper tailscale.com/tsweb/promvarz from tailscale.com/tsweb tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/dnstype from tailscale.com/tailcfg + tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/tailcfg+ tailscale.com/types/key from tailscale.com/client/tailscale+ @@ -162,7 +162,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 01c60bc9e0fc4..a4ba90d3d6683 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -278,7 +278,7 @@ func TestConnectorWithProxyClass(t *testing.T) { pc.Status = tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, }}} }) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 9c0e8dd0eb858..b77ea22ef5297 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -654,7 +654,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -690,6 +690,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording + tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+ @@ -808,7 +809,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 16dcae4903515..ede61070b4399 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -22,7 +22,7 @@ rules: resources: ["ingressclasses"] verbs: ["get", "list", "watch"] - apiGroups: ["tailscale.com"] - resources: ["connectors", "connectors/status", "proxyclasses", "proxyclasses/status"] + resources: ["connectors", "connectors/status", "proxyclasses", "proxyclasses/status", "proxygroups", "proxygroups/status"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["tailscale.com"] resources: ["dnsconfigs", "dnsconfigs/status"] @@ -53,12 +53,15 @@ rules: - apiGroups: [""] resources: ["secrets", "serviceaccounts", "configmaps"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get","list","watch"] - apiGroups: ["apps"] resources: ["statefulsets", "deployments"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] - apiGroups: ["discovery.k8s.io"] resources: ["endpointslices"] - verbs: ["get", "list", "watch"] + verbs: ["get", "list", "watch", "create", "update", "deletecollection"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["roles", "rolebindings"] verbs: ["get", "create", "patch", "update", "list", "watch"] diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index bf98a725ba867..e6f4cada44de7 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -57,12 +57,12 @@ operatorConfig: # proxyConfig contains configuraton that will be applied to any ingress/egress # proxies created by the operator. -# https://tailscale.com/kb/1236/kubernetes-operator/#cluster-ingress -# https://tailscale.com/kb/1236/kubernetes-operator/#cluster-egress +# https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress +# https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress # Note that this section contains only a few global configuration options and # will not be updated with more configuration options in the future. # If you need more configuration options, take a look at ProxyClass: -# https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource +# https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource proxyConfig: image: # Repository defaults to DockerHub, but images are also synced to ghcr.io/tailscale/tailscale. @@ -79,12 +79,13 @@ proxyConfig: defaultTags: "tag:k8s" firewallMode: auto # If defined, this proxy class will be used as the default proxy class for - # service and ingress resources that do not have a proxy class defined. + # service and ingress resources that do not have a proxy class defined. It + # does not apply to Connector resources. defaultProxyClass: "" # apiServerProxyConfig allows to configure whether the operator should expose # Kubernetes API server. -# https://tailscale.com/kb/1236/kubernetes-operator/#accessing-the-kubernetes-control-plane-using-an-api-server-proxy +# https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy apiServerProxyConfig: mode: "false" # "true", "false", "noauth" diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index 66ff060d40540..9614f74e6b162 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -37,7 +37,7 @@ spec: exit node. Connector is a cluster-scoped resource. More info: - https://tailscale.com/kb/1236/kubernetes-operator#deploying-exit-nodes-and-subnet-routers-on-kubernetes-using-connector-custom-resource + https://tailscale.com/kb/1441/kubernetes-operator-connector type: object required: - spec @@ -115,7 +115,7 @@ spec: To autoapprove the subnet routes or exit node defined by a Connector, you can configure Tailscale ACLs to give these tags the necessary permissions. - See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes. + See https://tailscale.com/kb/1337/acl-syntax#autoapprovers. If you specify custom tags here, you must also make the operator an owner of these tags. See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. Tags cannot be changed once a Connector node has been created. diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index dc16511ceaee5..0fff30516a132 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -30,7 +30,7 @@ spec: connector.spec.proxyClass field. ProxyClass is a cluster scoped resource. More info: - https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource. + https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource type: object required: - spec @@ -1908,7 +1908,7 @@ spec: routes advertized by other nodes on the tailnet, such as subnet routes. This is equivalent of passing --accept-routes flag to a tailscale Linux client. - https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines + https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices Defaults to false. type: boolean status: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml new file mode 100644 index 0000000000000..66701bdf4afbd --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -0,0 +1,187 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + name: proxygroups.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroup + listKind: ProxyGroupList + plural: proxygroups + shortNames: + - pg + singular: proxygroup + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Status of the deployed ProxyGroup resources. + jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec describes the desired ProxyGroup instances. + type: object + required: + - type + properties: + hostnamePrefix: + description: |- + HostnamePrefix is the hostname prefix to use for tailnet devices created + by the ProxyGroup. Each device will have the integer number from its + StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + type: string + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + proxyClass: + description: |- + ProxyClass is the name of the ProxyClass custom resource that contains + configuration options that should be applied to the resources created + for this ProxyGroup. If unset, and there is no default ProxyClass + configured, the operator will create resources with the default + configuration. + type: string + replicas: + description: |- + Replicas specifies how many replicas to create the StatefulSet with. + Defaults to 2. + type: integer + format: int32 + tags: + description: |- + Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. + If you specify custom tags here, make sure you also make the operator + an owner of these tags. + See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. + Tags cannot be changed once a ProxyGroup device has been created. + Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. + type: array + items: + type: string + pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + type: + description: Type of the ProxyGroup proxies. Currently the only supported type is egress. + type: string + enum: + - egress + status: + description: |- + ProxyGroupStatus describes the status of the ProxyGroup resources. This is + set and managed by the Tailscale operator. + type: object + properties: + conditions: + description: |- + List of status conditions to indicate the status of the ProxyGroup + resources. Known condition types are `ProxyGroupReady`. + type: array + items: + description: Condition contains details for one aspect of the current state of this API Resource. + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + devices: + description: List of tailnet devices associated with the ProxyGroup StatefulSet. + type: array + items: + type: object + required: + - hostname + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the device. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the device. + type: array + items: + type: string + x-kubernetes-list-map-keys: + - hostname + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 2c4cf2f6bb95d..fda8bcebdbe53 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -1670,7 +1670,7 @@ spec: - type x-kubernetes-list-type: map devices: - description: List of tailnet devices associated with the Recorder statefulset. + description: List of tailnet devices associated with the Recorder StatefulSet. type: array items: type: object diff --git a/cmd/k8s-operator/deploy/examples/proxygroup.yaml b/cmd/k8s-operator/deploy/examples/proxygroup.yaml new file mode 100644 index 0000000000000..337d87f0b7e80 --- /dev/null +++ b/cmd/k8s-operator/deploy/examples/proxygroup.yaml @@ -0,0 +1,7 @@ +apiVersion: tailscale.com/v1alpha1 +kind: ProxyGroup +metadata: + name: egress-proxies +spec: + type: egress + replicas: 3 diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 0929aff4c8149..1a812b7362757 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -66,7 +66,7 @@ spec: exit node. Connector is a cluster-scoped resource. More info: - https://tailscale.com/kb/1236/kubernetes-operator#deploying-exit-nodes-and-subnet-routers-on-kubernetes-using-connector-custom-resource + https://tailscale.com/kb/1441/kubernetes-operator-connector properties: apiVersion: description: |- @@ -140,7 +140,7 @@ spec: To autoapprove the subnet routes or exit node defined by a Connector, you can configure Tailscale ACLs to give these tags the necessary permissions. - See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes. + See https://tailscale.com/kb/1337/acl-syntax#autoapprovers. If you specify custom tags here, you must also make the operator an owner of these tags. See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. Tags cannot be changed once a Connector node has been created. @@ -463,7 +463,7 @@ spec: connector.spec.proxyClass field. ProxyClass is a cluster scoped resource. More info: - https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource. + https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource properties: apiVersion: description: |- @@ -2336,7 +2336,7 @@ spec: routes advertized by other nodes on the tailnet, such as subnet routes. This is equivalent of passing --accept-routes flag to a tailscale Linux client. - https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines + https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices Defaults to false. type: boolean type: object @@ -2418,6 +2418,194 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab + name: proxygroups.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroup + listKind: ProxyGroupList + plural: proxygroups + shortNames: + - pg + singular: proxygroup + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Status of the deployed ProxyGroup resources. + jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec describes the desired ProxyGroup instances. + properties: + hostnamePrefix: + description: |- + HostnamePrefix is the hostname prefix to use for tailnet devices created + by the ProxyGroup. Each device will have the integer number from its + StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + type: string + proxyClass: + description: |- + ProxyClass is the name of the ProxyClass custom resource that contains + configuration options that should be applied to the resources created + for this ProxyGroup. If unset, and there is no default ProxyClass + configured, the operator will create resources with the default + configuration. + type: string + replicas: + description: |- + Replicas specifies how many replicas to create the StatefulSet with. + Defaults to 2. + format: int32 + type: integer + tags: + description: |- + Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. + If you specify custom tags here, make sure you also make the operator + an owner of these tags. + See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. + Tags cannot be changed once a ProxyGroup device has been created. + Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. + items: + pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + type: string + type: array + type: + description: Type of the ProxyGroup proxies. Currently the only supported type is egress. + enum: + - egress + type: string + required: + - type + type: object + status: + description: |- + ProxyGroupStatus describes the status of the ProxyGroup resources. This is + set and managed by the Tailscale operator. + properties: + conditions: + description: |- + List of status conditions to indicate the status of the ProxyGroup + resources. Known condition types are `ProxyGroupReady`. + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + devices: + description: List of tailnet devices associated with the ProxyGroup StatefulSet. + items: + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the device. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the device. + items: + type: string + type: array + required: + - hostname + type: object + type: array + x-kubernetes-list-map-keys: + - hostname + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.15.1-0.20240618033008-7824932b0cab @@ -4084,7 +4272,7 @@ spec: - type x-kubernetes-list-type: map devices: - description: List of tailnet devices associated with the Recorder statefulset. + description: List of tailnet devices associated with the Recorder StatefulSet. items: properties: hostname: @@ -4171,6 +4359,8 @@ rules: - connectors/status - proxyclasses - proxyclasses/status + - proxygroups + - proxygroups/status verbs: - get - list @@ -4231,6 +4421,14 @@ rules: - patch - update - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch - apiGroups: - apps resources: @@ -4253,6 +4451,9 @@ rules: - get - list - watch + - create + - update + - deletecollection - apiGroups: - rbac.authorization.k8s.io resources: diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go new file mode 100644 index 0000000000000..85992abed9e37 --- /dev/null +++ b/cmd/k8s-operator/egress-eps.go @@ -0,0 +1,213 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "net/netip" + "reflect" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" + "tailscale.com/kube/egressservices" + "tailscale.com/types/ptr" +) + +// egressEpsReconciler reconciles EndpointSlices for tailnet services exposed to cluster via egress ProxyGroup proxies. +type egressEpsReconciler struct { + client.Client + logger *zap.SugaredLogger + tsNamespace string +} + +// Reconcile reconciles an EndpointSlice for a tailnet service. It updates the EndpointSlice with the endpoints of +// those ProxyGroup Pods that are ready to route traffic to the tailnet service. +// It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired +// configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready. +func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + l := er.logger.With("Service", req.NamespacedName) + l.Debugf("starting reconcile") + defer l.Debugf("reconcile finished") + + eps := new(discoveryv1.EndpointSlice) + err = er.Get(ctx, req.NamespacedName, eps) + if apierrors.IsNotFound(err) { + l.Debugf("EndpointSlice not found") + return reconcile.Result{}, nil + } + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) + } + if !eps.DeletionTimestamp.IsZero() { + l.Debugf("EnpointSlice is being deleted") + return res, nil + } + + // Get the user-created ExternalName Service and use its status conditions to determine whether cluster + // resources are set up for this tailnet service. + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: eps.Labels[LabelParentName], + Namespace: eps.Labels[LabelParentNamespace], + }, + } + err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) + if apierrors.IsNotFound(err) { + l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) + return res, nil + } + if err != nil { + return res, fmt.Errorf("error retrieving ExternalName Service: %w", err) + } + if !tsoperator.EgressServiceIsValidAndConfigured(svc) { + l.Infof("Cluster resources for ExternalName Service %s/%s are not yet configured", svc.Namespace, svc.Name) + return res, nil + } + + // TODO(irbekrm): currently this reconcile loop runs all the checks every time it's triggered, which is + // wasteful. Once we have a Ready condition for ExternalName Services for ProxyGroup, use the condition to + // determine if a reconcile is needed. + + oldEps := eps.DeepCopy() + proxyGroupName := eps.Labels[labelProxyGroup] + tailnetSvc := tailnetSvcName(svc) + l = l.With("tailnet-service-name", tailnetSvc) + + // Retrieve the desired tailnet service configuration from the ConfigMap. + _, cfgs, err := egressSvcsConfigs(ctx, er.Client, proxyGroupName, er.tsNamespace) + if err != nil { + return res, fmt.Errorf("error retrieving tailnet services configuration: %w", err) + } + cfg, ok := (*cfgs)[tailnetSvc] + if !ok { + l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) + return res, nil + } + + // Check which Pods in ProxyGroup are ready to route traffic to this + // egress service. + podList := &corev1.PodList{} + if err := er.List(ctx, podList, client.MatchingLabels(pgLabels(proxyGroupName, nil))); err != nil { + return res, fmt.Errorf("error listing Pods for ProxyGroup %s: %w", proxyGroupName, err) + } + newEndpoints := make([]discoveryv1.Endpoint, 0) + for _, pod := range podList.Items { + ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l) + if err != nil { + return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err) + } + if !ready { + continue // maybe next time + } + podIP, err := podIPv4(&pod) // we currently only support IPv4 + if err != nil { + return res, fmt.Errorf("error determining IPv4 address for Pod: %w", err) + } + newEndpoints = append(newEndpoints, discoveryv1.Endpoint{ + Hostname: (*string)(&pod.UID), + Addresses: []string{podIP}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), + }, + }) + } + // Note that Endpoints are being overwritten with the currently valid endpoints so we don't need to explicitly + // run a cleanup for deleted Pods etc. + eps.Endpoints = newEndpoints + if !reflect.DeepEqual(eps, oldEps) { + l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") + if err := er.Update(ctx, eps); err != nil { + return res, fmt.Errorf("error updating EndpointSlice: %w", err) + } + } + return res, nil +} + +func podIPv4(pod *corev1.Pod) (string, error) { + for _, ip := range pod.Status.PodIPs { + parsed, err := netip.ParseAddr(ip.IP) + if err != nil { + return "", fmt.Errorf("error parsing IP address %s: %w", ip, err) + } + if parsed.Is4() { + return parsed.String(), nil + } + } + return "", nil +} + +// podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to +// route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service +// status written there to the desired service configuration. +func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) { + l = l.With("proxy_pod", pod.Name) + l.Debugf("checking whether proxy is ready to route to egress service") + if !pod.DeletionTimestamp.IsZero() { + l.Debugf("proxy Pod is being deleted, ignore") + return false, nil + } + podIP, err := podIPv4(&pod) + if err != nil { + return false, fmt.Errorf("error determining Pod IP address: %v", err) + } + if podIP == "" { + l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") + return false, nil + } + stateS := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + }, + } + err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) + if apierrors.IsNotFound(err) { + l.Debugf("proxy does not have a state Secret, waiting...") + return false, nil + } + if err != nil { + return false, fmt.Errorf("error getting state Secret: %w", err) + } + svcStatusBS := stateS.Data[egressservices.KeyEgressServices] + if len(svcStatusBS) == 0 { + l.Debugf("proxy's state Secret does not contain egress services status, waiting...") + return false, nil + } + svcStatus := &egressservices.Status{} + if err := json.Unmarshal(svcStatusBS, svcStatus); err != nil { + return false, fmt.Errorf("error unmarshalling egress service status: %w", err) + } + if !strings.EqualFold(podIP, svcStatus.PodIPv4) { + l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) + return false, nil + } + st, ok := (*svcStatus).Services[tailnetSvcName] + if !ok { + l.Infof("proxy's state Secret does not have egress service status, waiting...") + return false, nil + } + if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) { + l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) + return false, nil + } + if !reflect.DeepEqual(cfg.Ports, st.Ports) { + l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) + return false, nil + } + l.Debugf("proxy is ready to route traffic to egress service") + return true, nil +} diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go new file mode 100644 index 0000000000000..a64f3e4e1bb50 --- /dev/null +++ b/cmd/k8s-operator/egress-eps_test.go @@ -0,0 +1,211 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "encoding/json" + "fmt" + "math/rand/v2" + "testing" + + "github.com/AlekSi/pointer" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" + "tailscale.com/tstest" + "tailscale.com/util/mak" +) + +func TestTailscaleEgressEndpointSlices(t *testing.T) { + clock := tstest.NewClock(tstest.ClockOpts{}) + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "foo.bar.ts.net", + AnnotationProxyGroup: "foo", + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "placeholder", + Type: corev1.ServiceTypeExternalName, + Selector: nil, + Ports: []corev1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + }, + }, + }, + Status: corev1.ServiceStatus{ + Conditions: []metav1.Condition{ + condition(tsapi.EgressSvcConfigured, metav1.ConditionTrue, "", "", clock), + condition(tsapi.EgressSvcValid, metav1.ConditionTrue, "", "", clock), + }, + }, + } + port := randomPort() + cm := configMapForSvc(t, svc, port) + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(svc, cm). + WithStatusSubresource(svc). + Build() + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + er := &egressEpsReconciler{ + Client: fc, + logger: zl.Sugar(), + tsNamespace: "operator-ns", + } + eps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "operator-ns", + Labels: map[string]string{ + LabelParentName: "test", + LabelParentNamespace: "default", + labelSvcType: typeEgress, + labelProxyGroup: "foo"}, + }, + AddressType: discoveryv1.AddressTypeIPv4, + } + mustCreate(t, fc, eps) + + t.Run("no_proxy_group_resources", func(t *testing.T) { + expectReconciled(t, er, "operator-ns", "foo") // should not error + }) + + t.Run("no_pods_ready_to_route_traffic", func(t *testing.T) { + pod, stateS := podAndSecretForProxyGroup("foo") + mustCreate(t, fc, pod) + mustCreate(t, fc, stateS) + expectReconciled(t, er, "operator-ns", "foo") // should not error + }) + + t.Run("pods_are_ready_to_route_traffic", func(t *testing.T) { + pod, stateS := podAndSecretForProxyGroup("foo") + stBs := serviceStatusForPodIP(t, svc, pod.Status.PodIPs[0].IP, port) + mustUpdate(t, fc, "operator-ns", stateS.Name, func(s *corev1.Secret) { + mak.Set(&s.Data, egressservices.KeyEgressServices, stBs) + }) + expectReconciled(t, er, "operator-ns", "foo") + eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ + Addresses: []string{"10.0.0.1"}, + Hostname: pointer.To("foo"), + Conditions: discoveryv1.EndpointConditions{ + Serving: pointer.ToBool(true), + Ready: pointer.ToBool(true), + Terminating: pointer.ToBool(false), + }, + }) + expectEqual(t, fc, eps, nil) + }) + t.Run("status_does_not_match_pod_ip", func(t *testing.T) { + _, stateS := podAndSecretForProxyGroup("foo") // replica Pod has IP 10.0.0.1 + stBs := serviceStatusForPodIP(t, svc, "10.0.0.2", port) // status is for a Pod with IP 10.0.0.2 + mustUpdate(t, fc, "operator-ns", stateS.Name, func(s *corev1.Secret) { + mak.Set(&s.Data, egressservices.KeyEgressServices, stBs) + }) + expectReconciled(t, er, "operator-ns", "foo") + eps.Endpoints = []discoveryv1.Endpoint{} + expectEqual(t, fc, eps, nil) + }) +} + +func configMapForSvc(t *testing.T, svc *corev1.Service, p uint16) *corev1.ConfigMap { + t.Helper() + ports := make(map[egressservices.PortMap]struct{}) + for _, port := range svc.Spec.Ports { + ports[egressservices.PortMap{Protocol: string(port.Protocol), MatchPort: p, TargetPort: uint16(port.Port)}] = struct{}{} + } + cfg := egressservices.Config{ + Ports: ports, + } + if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" { + cfg.TailnetTarget = egressservices.TailnetTarget{FQDN: fqdn} + } + if ip := svc.Annotations[AnnotationTailnetTargetIP]; ip != "" { + cfg.TailnetTarget = egressservices.TailnetTarget{IP: ip} + } + name := tailnetSvcName(svc) + cfgs := egressservices.Configs{name: cfg} + bs, err := json.Marshal(&cfgs) + if err != nil { + t.Fatalf("error marshalling config: %v", err) + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgEgressCMName(svc.Annotations[AnnotationProxyGroup]), + Namespace: "operator-ns", + }, + BinaryData: map[string][]byte{egressservices.KeyEgressServices: bs}, + } + return cm +} + +func serviceStatusForPodIP(t *testing.T, svc *corev1.Service, ip string, p uint16) []byte { + t.Helper() + ports := make(map[egressservices.PortMap]struct{}) + for _, port := range svc.Spec.Ports { + ports[egressservices.PortMap{Protocol: string(port.Protocol), MatchPort: p, TargetPort: uint16(port.Port)}] = struct{}{} + } + svcSt := egressservices.ServiceStatus{Ports: ports} + if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" { + svcSt.TailnetTarget = egressservices.TailnetTarget{FQDN: fqdn} + } + if ip := svc.Annotations[AnnotationTailnetTargetIP]; ip != "" { + svcSt.TailnetTarget = egressservices.TailnetTarget{IP: ip} + } + svcName := tailnetSvcName(svc) + st := egressservices.Status{ + PodIPv4: ip, + Services: map[string]*egressservices.ServiceStatus{svcName: &svcSt}, + } + bs, err := json.Marshal(st) + if err != nil { + t.Fatalf("error marshalling service status: %v", err) + } + return bs +} + +func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-0", pg), + Namespace: "operator-ns", + Labels: pgLabels(pg, nil), + UID: "foo", + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + {IP: "10.0.0.1"}, + }, + }, + } + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-0", pg), + Namespace: "operator-ns", + Labels: pgSecretLabels(pg, "state"), + }, + } + return p, s +} + +func randomPort() uint16 { + return uint16(rand.Int32N(1000) + 1000) +} diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go new file mode 100644 index 0000000000000..f6991145f88fc --- /dev/null +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -0,0 +1,179 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstime" +) + +const ( + reasonReadinessCheckFailed = "ReadinessCheckFailed" + reasonClusterResourcesNotReady = "ClusterResourcesNotReady" + reasonNoProxies = "NoProxiesConfigured" + reasonNotReady = "NotReadyToRouteTraffic" + reasonReady = "ReadyToRouteTraffic" + reasonPartiallyReady = "PartiallyReadyToRouteTraffic" + msgReadyToRouteTemplate = "%d out of %d replicas are ready to route traffic" +) + +type egressSvcsReadinessReconciler struct { + client.Client + logger *zap.SugaredLogger + clock tstime.Clock + tsNamespace string +} + +// Reconcile reconciles an ExternalName Service that defines a tailnet target to be exposed on a ProxyGroup and sets the +// EgressSvcReady condition on it. The condition gets set to true if at least one of the proxies is currently ready to +// route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress +// service to determine how many replicas are currently able to route traffic. +func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + l := esrr.logger.With("Service", req.NamespacedName) + defer l.Info("reconcile finished") + + svc := new(corev1.Service) + if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { + l.Info("Service not found") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get Service: %w", err) + } + var ( + reason, msg string + st metav1.ConditionStatus = metav1.ConditionUnknown + ) + oldStatus := svc.Status.DeepCopy() + defer func() { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) + if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) { + err = errors.Join(err, esrr.Status().Update(ctx, svc)) + } + }() + + crl := egressSvcChildResourceLabels(svc) + eps, err := getSingleObject[discoveryv1.EndpointSlice](ctx, esrr.Client, esrr.tsNamespace, crl) + if err != nil { + err = fmt.Errorf("error getting EndpointSlice: %w", err) + reason = reasonReadinessCheckFailed + msg = err.Error() + return res, err + } + if eps == nil { + l.Infof("EndpointSlice for Service does not yet exist, waiting...") + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + st = metav1.ConditionFalse + return res, nil + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: svc.Annotations[AnnotationProxyGroup], + }, + } + err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) + if apierrors.IsNotFound(err) { + l.Infof("ProxyGroup for Service does not exist, waiting...") + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + st = metav1.ConditionFalse + return res, nil + } + if err != nil { + err = fmt.Errorf("error retrieving ProxyGroup: %w", err) + reason = reasonReadinessCheckFailed + msg = err.Error() + return res, err + } + if !tsoperator.ProxyGroupIsReady(pg) { + l.Infof("ProxyGroup for Service is not ready, waiting...") + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + st = metav1.ConditionFalse + return res, nil + } + + replicas := pgReplicas(pg) + if replicas == 0 { + l.Infof("ProxyGroup replicas set to 0") + reason, msg = reasonNoProxies, reasonNoProxies + st = metav1.ConditionFalse + return res, nil + } + podLabels := pgLabels(pg.Name, nil) + var readyReplicas int32 + for i := range replicas { + podLabels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", i) + pod, err := getSingleObject[corev1.Pod](ctx, esrr.Client, esrr.tsNamespace, podLabels) + if err != nil { + err = fmt.Errorf("error retrieving ProxyGroup Pod: %w", err) + reason = reasonReadinessCheckFailed + msg = err.Error() + return res, err + } + if pod == nil { + l.Infof("[unexpected] ProxyGroup is ready, but replica %d was not found", i) + reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady + return res, nil + } + l.Infof("looking at Pod with IPs %v", pod.Status.PodIPs) + ready := false + for _, ep := range eps.Endpoints { + l.Infof("looking at endpoint with addresses %v", ep.Addresses) + if endpointReadyForPod(&ep, pod, l) { + l.Infof("endpoint is ready for Pod") + ready = true + break + } + } + if ready { + readyReplicas++ + } + } + msg = fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) + if readyReplicas == 0 { + reason = reasonNotReady + st = metav1.ConditionFalse + return res, nil + } + st = metav1.ConditionTrue + if readyReplicas < replicas { + reason = reasonPartiallyReady + } else { + reason = reasonReady + } + return res, nil +} + +// endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. +// Endpoint must not be nil. +func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { + podIP, err := podIPv4(pod) + if err != nil { + l.Infof("[unexpected] error retrieving Pod's IPv4 address: %v", err) + return false + } + // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. + if len(ep.Addresses) != 1 { + return false + } + return strings.EqualFold(ep.Addresses[0], podIP) && + *ep.Conditions.Ready && + *ep.Conditions.Serving && + !*ep.Conditions.Terminating +} diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go new file mode 100644 index 0000000000000..052eb1a493801 --- /dev/null +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -0,0 +1,169 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + "testing" + + "github.com/AlekSi/pointer" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" + "tailscale.com/tstime" +) + +func TestEgressServiceReadiness(t *testing.T) { + // We need to pass a ProxyGroup object to WithStatusSubresource because of some quirks in how the fake client + // works. Without this code further down would not be able to update ProxyGroup status. + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&tsapi.ProxyGroup{}). + Build() + zl, _ := zap.NewDevelopment() + cl := tstest.NewClock(tstest.ClockOpts{}) + rec := &egressSvcsReadinessReconciler{ + tsNamespace: "operator-ns", + Client: fc, + logger: zl.Sugar(), + clock: cl, + } + tailnetFQDN := "my-app.tailnetxyz.ts.net" + egressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-app", + Namespace: "dev", + Annotations: map[string]string{ + AnnotationProxyGroup: "dev", + AnnotationTailnetTargetFQDN: tailnetFQDN, + }, + }, + } + fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} + l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) + eps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-app", + Namespace: "operator-ns", + Labels: l, + }, + AddressType: discoveryv1.AddressTypeIPv4, + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dev", + }, + } + mustCreate(t, fc, egressSvc) + setClusterNotReady(egressSvc, cl, zl.Sugar()) + t.Run("endpointslice_does_not_exist", func(t *testing.T) { + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // not ready + }) + t.Run("proxy_group_does_not_exist", func(t *testing.T) { + mustCreate(t, fc, eps) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // still not ready + }) + t.Run("proxy_group_not_ready", func(t *testing.T) { + mustCreate(t, fc, pg) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // still not ready + }) + t.Run("no_ready_replicas", func(t *testing.T) { + setPGReady(pg, cl, zl.Sugar()) + mustUpdateStatus(t, fc, pg.Namespace, pg.Name, func(p *tsapi.ProxyGroup) { + p.Status = pg.Status + }) + expectEqual(t, fc, pg, nil) + for i := range pgReplicas(pg) { + p := pod(pg, i) + mustCreate(t, fc, p) + mustUpdateStatus(t, fc, p.Namespace, p.Name, func(existing *corev1.Pod) { + existing.Status.PodIPs = p.Status.PodIPs + }) + } + expectReconciled(t, rec, "dev", "my-app") + setNotReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg)) + expectEqual(t, fc, egressSvc, nil) // still not ready + }) + t.Run("one_ready_replica", func(t *testing.T) { + setEndpointForReplica(pg, 0, eps) + mustUpdate(t, fc, eps.Namespace, eps.Name, func(e *discoveryv1.EndpointSlice) { + e.Endpoints = eps.Endpoints + }) + setReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg), 1) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // partially ready + }) + t.Run("all_replicas_ready", func(t *testing.T) { + for i := range pgReplicas(pg) { + setEndpointForReplica(pg, i, eps) + } + mustUpdate(t, fc, eps.Namespace, eps.Name, func(e *discoveryv1.EndpointSlice) { + e.Endpoints = eps.Endpoints + }) + setReady(egressSvc, cl, zl.Sugar(), pgReplicas(pg), pgReplicas(pg)) + expectReconciled(t, rec, "dev", "my-app") + expectEqual(t, fc, egressSvc, nil) // ready + }) +} + +func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l) +} + +func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) { + msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l) +} + +func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) { + reason := reasonPartiallyReady + if readyReplicas == replicas { + reason = reasonReady + } + msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l) +} + +func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) +} + +func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { + p := pod(pg, ordinal) + eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ + Addresses: []string{p.Status.PodIPs[0].IP}, + Conditions: discoveryv1.EndpointConditions{ + Ready: pointer.ToBool(true), + Serving: pointer.ToBool(true), + Terminating: pointer.ToBool(false), + }, + }) +} + +func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { + l := pgLabels(pg.Name, nil) + l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) + ip := fmt.Sprintf("10.0.0.%d", ordinal) + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), + Namespace: "operator-ns", + Labels: l, + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: ip}}, + }, + } +} diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go new file mode 100644 index 0000000000000..98ed943669cd0 --- /dev/null +++ b/cmd/k8s-operator/egress-services.go @@ -0,0 +1,716 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "math/rand/v2" + "reflect" + "slices" + "strings" + "sync" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" + "tailscale.com/tstime" + "tailscale.com/util/clientmetric" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +const ( + reasonEgressSvcInvalid = "EgressSvcInvalid" + reasonEgressSvcValid = "EgressSvcValid" + reasonEgressSvcCreationFailed = "EgressSvcCreationFailed" + reasonProxyGroupNotReady = "ProxyGroupNotReady" + + labelProxyGroup = "tailscale.com/proxy-group" + + labelSvcType = "tailscale.com/svc-type" // ingress or egress + typeEgress = "egress" + // maxPorts is the maximum number of ports that can be exposed on a + // container. In practice this will be ports in range [3000 - 4000). The + // high range should make it easier to distinguish container ports from + // the tailnet target ports for debugging purposes (i.e when reading + // netfilter rules). The limit of 10000 is somewhat arbitrary, the + // assumption is that this would not be hit in practice. + maxPorts = 10000 + + indexEgressProxyGroup = ".metadata.annotations.egress-proxy-group" +) + +var gaugeEgressServices = clientmetric.NewGauge(kubetypes.MetricEgressServiceCount) + +// egressSvcsReconciler reconciles user created ExternalName Services that specify a tailnet +// endpoint that should be exposed to cluster workloads and an egress ProxyGroup +// on whose proxies it should be exposed. +type egressSvcsReconciler struct { + client.Client + logger *zap.SugaredLogger + recorder record.EventRecorder + clock tstime.Clock + tsNamespace string + + mu sync.Mutex // protects following + svcs set.Slice[types.UID] // UIDs of all currently managed egress Services for ProxyGroup +} + +// Reconcile reconciles an ExternalName Service that specifies a tailnet target and a ProxyGroup on whose proxies should +// forward cluster traffic to the target. +// For an ExternalName Service the reconciler: +// +// - for each port N defined on the ExternalName Service, allocates a port X in range [3000- 4000), unique for the +// ProxyGroup proxies. Proxies will forward cluster traffic received on port N to port M on the tailnet target +// +// - creates a ClusterIP Service in the operator's namespace with portmappings for all M->N port pairs. This will allow +// cluster workloads to send traffic on the user-defined tailnet target port and get it transparently mapped to the +// randomly selected port on proxy Pods. +// +// - creates an EndpointSlice in the operator's namespace with kubernetes.io/service-name label pointing to the +// ClusterIP Service. The endpoints will get dynamically updates to proxy Pod IPs as the Pods become ready to route +// traffic to the tailnet target. kubernetes.io/service-name label ensures that kube-proxy sets up routing rules to +// forward cluster traffic received on ClusterIP Service's IP address to the endpoints (Pod IPs). +// +// - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the +// portmappings. +func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + l := esr.logger.With("Service", req.NamespacedName) + defer l.Info("reconcile finished") + + svc := new(corev1.Service) + if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { + l.Info("Service not found") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get Service: %w", err) + } + + // Name of the 'egress service', meaning the tailnet target. + tailnetSvc := tailnetSvcName(svc) + l = l.With("tailnet-service", tailnetSvc) + + // Note that resources for egress Services are only cleaned up when the + // Service is actually deleted (and not if, for example, user decides to + // remove the Tailscale annotation from it). This should be fine- we + // assume that the egress ExternalName Services are always created for + // Tailscale operator specifically. + if !svc.DeletionTimestamp.IsZero() { + l.Info("Service is being deleted, ensuring resource cleanup") + return res, esr.maybeCleanup(ctx, svc, l) + } + + oldStatus := svc.Status.DeepCopy() + defer func() { + if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) { + err = errors.Join(err, esr.Status().Update(ctx, svc)) + } + }() + + // Validate the user-created ExternalName Service and the associated ProxyGroup. + if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil { + return res, fmt.Errorf("error validating cluster resources: %w", err) + } else if !ok { + return res, nil + } + + if !slices.Contains(svc.Finalizers, FinalizerName) { + l.Infof("configuring tailnet service") // logged exactly once + svc.Finalizers = append(svc.Finalizers, FinalizerName) + if err := esr.Update(ctx, svc); err != nil { + err := fmt.Errorf("failed to add finalizer: %w", err) + r := svcConfiguredReason(svc, false, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + return res, err + } + esr.mu.Lock() + esr.svcs.Add(svc.UID) + gaugeEgressServices.Set(int64(esr.svcs.Len())) + esr.mu.Unlock() + } + + if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil { + err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err) + r := svcConfiguredReason(svc, false, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + return res, err + } + + return res, esr.maybeProvision(ctx, svc, l) +} + +func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { + r := svcConfiguredReason(svc, false, l) + st := metav1.ConditionFalse + defer func() { + msg := r + if st != metav1.ConditionTrue && err != nil { + msg = err.Error() + } + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l) + }() + + crl := egressSvcChildResourceLabels(svc) + clusterIPSvc, err := getSingleObject[corev1.Service](ctx, esr.Client, esr.tsNamespace, crl) + if err != nil { + err = fmt.Errorf("error retrieving ClusterIP Service: %w", err) + return err + } + if clusterIPSvc == nil { + clusterIPSvc = esr.clusterIPSvcForEgress(crl) + } + upToDate := svcConfigurationUpToDate(svc, l) + provisioned := true + if !upToDate { + if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil { + return err + } + } + if !provisioned { + l.Infof("unable to provision cluster resources") + return nil + } + + // Update ExternalName Service to point at the ClusterIP Service. + clusterDomain := retrieveClusterDomain(esr.tsNamespace, l) + clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain) + if svc.Spec.ExternalName != clusterIPSvcFQDN { + l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) + svc.Spec.ExternalName = clusterIPSvcFQDN + if err = esr.Update(ctx, svc); err != nil { + err = fmt.Errorf("error updating ExternalName Service: %w", err) + return err + } + } + r = svcConfiguredReason(svc, true, l) + st = metav1.ConditionTrue + return nil +} + +func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) { + l.Infof("updating configuration...") + usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName) + if err != nil { + return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err) + } + + oldClusterIPSvc := clusterIPSvc.DeepCopy() + // loop over ClusterIP Service ports, remove any that are not needed. + for i := len(clusterIPSvc.Spec.Ports) - 1; i >= 0; i-- { + pm := clusterIPSvc.Spec.Ports[i] + found := false + for _, wantsPM := range svc.Spec.Ports { + if wantsPM.Port == pm.Port && strings.EqualFold(string(wantsPM.Protocol), string(pm.Protocol)) { + found = true + break + } + } + if !found { + l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) + clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1) + } + } + + // loop over ExternalName Service ports, for each one not found on + // ClusterIP Service produce new target port and add a portmapping to + // the ClusterIP Service. + for _, wantsPM := range svc.Spec.Ports { + found := false + for _, gotPM := range clusterIPSvc.Spec.Ports { + if wantsPM.Port == gotPM.Port && strings.EqualFold(string(wantsPM.Protocol), string(gotPM.Protocol)) { + found = true + break + } + } + if !found { + // Calculate a free port to expose on container and add + // a new PortMap to the ClusterIP Service. + if usedPorts.Len() == maxPorts { + // TODO(irbekrm): refactor to avoid extra reconciles here. Low priority as in practice, + // the limit should not be hit. + return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) + } + p := unusedPort(usedPorts) + l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) + usedPorts.Insert(p) + clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ + Name: wantsPM.Name, + Protocol: wantsPM.Protocol, + Port: wantsPM.Port, + TargetPort: intstr.FromInt32(p), + }) + } + } + if !reflect.DeepEqual(clusterIPSvc, oldClusterIPSvc) { + if clusterIPSvc, err = createOrUpdate(ctx, esr.Client, esr.tsNamespace, clusterIPSvc, func(svc *corev1.Service) { + svc.Labels = clusterIPSvc.Labels + svc.Spec = clusterIPSvc.Spec + }); err != nil { + return nil, false, fmt.Errorf("error ensuring ClusterIP Service: %v", err) + } + } + + crl := egressSvcEpsLabels(svc, clusterIPSvc) + // TODO(irbekrm): support IPv6, but need to investigate how kube proxy + // sets up Service -> Pod routing when IPv6 is involved. + eps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-ipv4", clusterIPSvc.Name), + Namespace: esr.tsNamespace, + Labels: crl, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Ports: epsPortsFromSvc(clusterIPSvc), + } + if eps, err = createOrUpdate(ctx, esr.Client, esr.tsNamespace, eps, func(e *discoveryv1.EndpointSlice) { + e.Labels = eps.Labels + e.AddressType = eps.AddressType + e.Ports = eps.Ports + for _, p := range e.Endpoints { + p.Conditions.Ready = nil + } + }); err != nil { + return nil, false, fmt.Errorf("error ensuring EndpointSlice: %w", err) + } + + cm, cfgs, err := egressSvcsConfigs(ctx, esr.Client, proxyGroupName, esr.tsNamespace) + if err != nil { + return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err) + } + if cm == nil { + l.Info("ConfigMap not yet created, waiting..") + return nil, false, nil + } + tailnetSvc := tailnetSvcName(svc) + gotCfg := (*cfgs)[tailnetSvc] + wantsCfg := egressSvcCfg(svc, clusterIPSvc) + if !reflect.DeepEqual(gotCfg, wantsCfg) { + l.Debugf("updating egress services ConfigMap %s", cm.Name) + mak.Set(cfgs, tailnetSvc, wantsCfg) + bs, err := json.Marshal(cfgs) + if err != nil { + return nil, false, fmt.Errorf("error marshalling egress services configs: %w", err) + } + mak.Set(&cm.BinaryData, egressservices.KeyEgressServices, bs) + if err := esr.Update(ctx, cm); err != nil { + return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err) + } + } + l.Infof("egress service configuration has been updated") + return clusterIPSvc, true, nil +} + +func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { + logger.Info("ensuring that resources created for egress service are deleted") + + // Delete egress service config from the ConfigMap mounted by the proxies. + if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, logger); err != nil { + return fmt.Errorf("error deleting egress service config: %w", err) + } + + // Delete the ClusterIP Service and EndpointSlice for the egress + // service. + types := []client.Object{ + &corev1.Service{}, + &discoveryv1.EndpointSlice{}, + } + crl := egressSvcChildResourceLabels(svc) + for _, typ := range types { + if err := esr.DeleteAllOf(ctx, typ, client.InNamespace(esr.tsNamespace), client.MatchingLabels(crl)); err != nil { + return fmt.Errorf("error deleting %s: %w", typ, err) + } + } + + ix := slices.Index(svc.Finalizers, FinalizerName) + if ix != -1 { + logger.Debug("Removing Tailscale finalizer from Service") + svc.Finalizers = append(svc.Finalizers[:ix], svc.Finalizers[ix+1:]...) + if err := esr.Update(ctx, svc); err != nil { + return fmt.Errorf("failed to remove finalizer: %w", err) + } + } + esr.mu.Lock() + esr.svcs.Remove(svc.UID) + gaugeEgressServices.Set(int64(esr.svcs.Len())) + esr.mu.Unlock() + logger.Info("successfully cleaned up resources for egress Service") + return nil +} + +func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error { + wantsProxyGroup := svc.Annotations[AnnotationProxyGroup] + cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) + if cond == nil { + return nil + } + ss := strings.Split(cond.Reason, ":") + if len(ss) < 3 { + return nil + } + if strings.EqualFold(wantsProxyGroup, ss[2]) { + return nil + } + esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup) + if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil { + return fmt.Errorf("error deleting egress service config: %w", err) + } + return nil +} + +// usedPortsForPG calculates the currently used match ports for ProxyGroup +// containers. It does that by looking by retrieving all target ports of all +// ClusterIP Services created for egress services exposed on this ProxyGroup's +// proxies. +// TODO(irbekrm): this is currently good enough because we only have a single worker and +// because these Services are created by us, so we can always expect to get the +// latest ClusterIP Services via the controller cache. It will not work as well +// once we split into multiple workers- at that point we probably want to set +// used ports on ProxyGroup's status. +func (esr *egressSvcsReconciler) usedPortsForPG(ctx context.Context, pg string) (sets.Set[int32], error) { + svcList := &corev1.ServiceList{} + if err := esr.List(ctx, svcList, client.InNamespace(esr.tsNamespace), client.MatchingLabels(map[string]string{labelProxyGroup: pg})); err != nil { + return nil, fmt.Errorf("error listing Services: %w", err) + } + usedPorts := sets.New[int32]() + for _, s := range svcList.Items { + for _, p := range s.Spec.Ports { + usedPorts.Insert(p.TargetPort.IntVal) + } + } + return usedPorts, nil +} + +// clusterIPSvcForEgress returns a template for the ClusterIP Service created +// for an egress service exposed on ProxyGroup proxies. The ClusterIP Service +// has no selector. Traffic sent to it will be routed to the endpoints defined +// by an EndpointSlice created for this egress service. +func (esr *egressSvcsReconciler) clusterIPSvcForEgress(crl map[string]string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: svcNameBase(crl[LabelParentName]), + Namespace: esr.tsNamespace, + Labels: crl, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + }, + } +} + +func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { + crl := egressSvcChildResourceLabels(svc) + cmName := pgEgressCMName(crl[labelProxyGroup]) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: esr.tsNamespace, + }, + } + l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) + l.Debug("ensuring that egress service configuration is removed from proxy config") + if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) { + l.Debugf("ConfigMap not found") + return nil + } else if err != nil { + return fmt.Errorf("error retrieving ConfigMap: %w", err) + } + bs := cm.BinaryData[egressservices.KeyEgressServices] + if len(bs) == 0 { + l.Debugf("ConfigMap does not contain egress service configs") + return nil + } + cfgs := &egressservices.Configs{} + if err := json.Unmarshal(bs, cfgs); err != nil { + return fmt.Errorf("error unmarshalling egress services configs") + } + tailnetSvc := tailnetSvcName(svc) + _, ok := (*cfgs)[tailnetSvc] + if !ok { + l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") + return nil + } + l.Infof("before deleting config %+#v", *cfgs) + delete(*cfgs, tailnetSvc) + l.Infof("after deleting config %+#v", *cfgs) + bs, err := json.Marshal(cfgs) + if err != nil { + return fmt.Errorf("error marshalling egress services configs: %w", err) + } + mak.Set(&cm.BinaryData, egressservices.KeyEgressServices, bs) + return esr.Update(ctx, cm) +} + +func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) { + proxyGroupName := svc.Annotations[AnnotationProxyGroup] + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: proxyGroupName, + }, + } + if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { + l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) + return false, nil + } else if err != nil { + err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) + return false, err + } + if !tsoperator.ProxyGroupIsReady(pg) { + l.Infof("ProxyGroup %s is not ready, waiting...", proxyGroupName) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) + return false, nil + } + + if violations := validateEgressService(svc, pg); len(violations) > 0 { + msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) + esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) + l.Info(msg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l) + tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) + return false, nil + } + l.Debugf("egress service is valid") + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l) + return true, nil +} + +func validateEgressService(svc *corev1.Service, pg *tsapi.ProxyGroup) []string { + violations := validateService(svc) + + // We check that only one of these two is set in the earlier validateService function. + if svc.Annotations[AnnotationTailnetTargetFQDN] == "" && svc.Annotations[AnnotationTailnetTargetIP] == "" { + violations = append(violations, fmt.Sprintf("egress Service for ProxyGroup must have one of %s, %s annotations set", AnnotationTailnetTargetFQDN, AnnotationTailnetTargetIP)) + } + if len(svc.Spec.Ports) == 0 { + violations = append(violations, "egress Service for ProxyGroup must have at least one target Port specified") + } + if svc.Spec.Type != corev1.ServiceTypeExternalName { + violations = append(violations, fmt.Sprintf("unexpected egress Service type %s. The only supported type is ExternalName.", svc.Spec.Type)) + } + if pg.Spec.Type != tsapi.ProxyGroupTypeEgress { + violations = append(violations, fmt.Sprintf("egress Service references ProxyGroup of type %s, must be type %s", pg.Spec.Type, tsapi.ProxyGroupTypeEgress)) + } + return violations +} + +// egressSvcNameBase returns a name base that can be passed to +// ObjectMeta.GenerateName to generate a name for the ClusterIP Service. +// The generated name needs to be short enough so that it can later be used to +// generate a valid Kubernetes resource name for the EndpointSlice in form +// 'ipv4-|ipv6-. +// A valid Kubernetes resource name must not be longer than 253 chars. +func svcNameBase(s string) string { + // -ipv4 - ipv6 + const maxClusterIPSvcNameLength = 253 - 5 + base := fmt.Sprintf("ts-%s-", s) + generator := names.SimpleNameGenerator + for { + generatedName := generator.GenerateName(base) + excess := len(generatedName) - maxClusterIPSvcNameLength + if excess <= 0 { + return base + } + base = base[:len(base)-1-excess] // cut off the excess chars + base = base + "-" // re-instate the dash + } +} + +// unusedPort returns a port in range [3000 - 4000). The caller must ensure that +// usedPorts does not contain all ports in range [3000 - 4000). +func unusedPort(usedPorts sets.Set[int32]) int32 { + foundFreePort := false + var suggestPort int32 + for !foundFreePort { + suggestPort = rand.Int32N(maxPorts) + 3000 + if !usedPorts.Has(suggestPort) { + foundFreePort = true + } + } + return suggestPort +} + +// tailnetTargetFromSvc returns a tailnet target for the given egress Service. +// Service must contain exactly one of tailscale.com/tailnet-ip, +// tailscale.com/tailnet-fqdn annotations. +func tailnetTargetFromSvc(svc *corev1.Service) egressservices.TailnetTarget { + if fqdn := svc.Annotations[AnnotationTailnetTargetFQDN]; fqdn != "" { + return egressservices.TailnetTarget{ + FQDN: fqdn, + } + } + return egressservices.TailnetTarget{ + IP: svc.Annotations[AnnotationTailnetTargetIP], + } +} + +func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service) egressservices.Config { + tt := tailnetTargetFromSvc(externalNameSvc) + cfg := egressservices.Config{TailnetTarget: tt} + for _, svcPort := range clusterIPSvc.Spec.Ports { + pm := portMap(svcPort) + mak.Set(&cfg.Ports, pm, struct{}{}) + } + return cfg +} + +func portMap(p corev1.ServicePort) egressservices.PortMap { + // TODO (irbekrm): out of bounds check? + return egressservices.PortMap{Protocol: string(p.Protocol), MatchPort: uint16(p.TargetPort.IntVal), TargetPort: uint16(p.Port)} +} + +func isEgressSvcForProxyGroup(obj client.Object) bool { + s, ok := obj.(*corev1.Service) + if !ok { + return false + } + annots := s.ObjectMeta.Annotations + return annots[AnnotationProxyGroup] != "" && (annots[AnnotationTailnetTargetFQDN] != "" || annots[AnnotationTailnetTargetIP] != "") +} + +// egressSvcConfig returns a ConfigMap that contains egress services configuration for the provided ProxyGroup as well +// as unmarshalled configuration from the ConfigMap. +func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, tsNamespace string) (cm *corev1.ConfigMap, cfgs *egressservices.Configs, err error) { + name := pgEgressCMName(proxyGroupName) + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: tsNamespace, + }, + } + if err := cl.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil { + return nil, nil, fmt.Errorf("error retrieving egress services ConfigMap %s: %v", name, err) + } + cfgs = &egressservices.Configs{} + if len(cm.BinaryData[egressservices.KeyEgressServices]) != 0 { + if err := json.Unmarshal(cm.BinaryData[egressservices.KeyEgressServices], cfgs); err != nil { + return nil, nil, fmt.Errorf("error unmarshaling egress services config %v: %w", cm.BinaryData[egressservices.KeyEgressServices], err) + } + } + return cm, cfgs, nil +} + +// egressSvcChildResourceLabels returns labels that should be applied to the +// ClusterIP Service and the EndpointSlice created for the egress service. +// TODO(irbekrm): we currently set a bunch of labels based on Kubernetes +// resource names (ProxyGroup, Service). Maximum allowed label length is 63 +// chars whilst the maximum allowed resource name length is 253 chars, so we +// should probably validate and truncate (?) the names is they are too long. +func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { + return map[string]string{ + LabelManaged: "true", + LabelParentType: "svc", + LabelParentName: svc.Name, + LabelParentNamespace: svc.Namespace, + labelProxyGroup: svc.Annotations[AnnotationProxyGroup], + labelSvcType: typeEgress, + } +} + +// egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. +func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { + l := egressSvcChildResourceLabels(extNSvc) + // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the + // endpoints defined on this EndpointSlice. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + l[discoveryv1.LabelServiceName] = clusterIPSvc.Name + // Kubernetes recommends setting this label. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management + l[discoveryv1.LabelManagedBy] = "tailscale.com" + return l +} + +func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { + cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) + if cond == nil { + return false + } + if cond.Status != metav1.ConditionTrue { + return false + } + wantsReadyReason := svcConfiguredReason(svc, true, l) + return strings.EqualFold(wantsReadyReason, cond.Reason) +} + +func cfgHash(c cfg, l *zap.SugaredLogger) string { + bs, err := json.Marshal(c) + if err != nil { + // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. + l.Infof("error marhsalling Config: %v", err) + return "" + } + h := sha256.New() + if _, err := h.Write(bs); err != nil { + // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. + l.Infof("error producing Config hash: %v", err) + return "" + } + return fmt.Sprintf("%x", h.Sum(nil)) +} + +type cfg struct { + Ports []corev1.ServicePort `json:"ports"` + TailnetTarget egressservices.TailnetTarget `json:"tailnetTarget"` + ProxyGroup string `json:"proxyGroup"` +} + +func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string { + var r string + if configured { + r = "ConfiguredFor:" + } else { + r = fmt.Sprintf("ConfigurationFailed:%s", r) + } + r += fmt.Sprintf("ProxyGroup:%s", svc.Annotations[AnnotationProxyGroup]) + tt := tailnetTargetFromSvc(svc) + s := cfg{ + Ports: svc.Spec.Ports, + TailnetTarget: tt, + ProxyGroup: svc.Annotations[AnnotationProxyGroup], + } + r += fmt.Sprintf(":Config:%s", cfgHash(s, l)) + return r +} + +// tailnetSvc accepts and ExternalName Service name and returns a name that will be used to distinguish this tailnet +// service from other tailnet services exposed to cluster workloads. +func tailnetSvcName(extNSvc *corev1.Service) string { + return fmt.Sprintf("%s-%s", extNSvc.Namespace, extNSvc.Name) +} + +// epsPortsFromSvc takes the ClusterIP Service created for an egress service and +// returns its Port array in a form that can be used for an EndpointSlice. +func epsPortsFromSvc(svc *corev1.Service) (ep []discoveryv1.EndpointPort) { + for _, p := range svc.Spec.Ports { + ep = append(ep, discoveryv1.EndpointPort{ + Protocol: &p.Protocol, + Port: &p.TargetPort.IntVal, + Name: &p.Name, + }) + } + return ep +} diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go new file mode 100644 index 0000000000000..ac77339853ebe --- /dev/null +++ b/cmd/k8s-operator/egress-services_test.go @@ -0,0 +1,268 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/AlekSi/pointer" + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" + "tailscale.com/tstest" + "tailscale.com/tstime" +) + +func TestTailscaleEgressServices(t *testing.T) { + pg := &tsapi.ProxyGroup{ + TypeMeta: metav1.TypeMeta{Kind: "ProxyGroup", APIVersion: "tailscale.com/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + UID: types.UID("1234-UID"), + }, + Spec: tsapi.ProxyGroupSpec{ + Replicas: pointer.To[int32](3), + Type: tsapi.ProxyGroupTypeEgress, + }, + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgEgressCMName("foo"), + Namespace: "operator-ns", + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, cm). + WithStatusSubresource(pg). + Build() + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + clock := tstest.NewClock(tstest.ClockOpts{}) + + esr := &egressSvcsReconciler{ + Client: fc, + logger: zl.Sugar(), + clock: clock, + tsNamespace: "operator-ns", + } + tailnetTargetFQDN := "foo.bar.ts.net." + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: tailnetTargetFQDN, + AnnotationProxyGroup: "foo", + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "placeholder", + Type: corev1.ServiceTypeExternalName, + Selector: nil, + Ports: []corev1.ServicePort{ + { + Name: "http", + Protocol: "TCP", + Port: 80, + }, + { + Name: "https", + Protocol: "TCP", + Port: 443, + }, + }, + }, + } + + t.Run("proxy_group_not_ready", func(t *testing.T) { + mustCreate(t, fc, svc) + expectReconciled(t, esr, "default", "test") + // Service should have EgressSvcValid condition set to Unknown. + svc.Status.Conditions = []metav1.Condition{condition(tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, clock)} + expectEqual(t, fc, svc, nil) + }) + + t.Run("proxy_group_ready", func(t *testing.T) { + mustUpdateStatus(t, fc, "", "foo", func(pg *tsapi.ProxyGroup) { + pg.Status.Conditions = []metav1.Condition{ + condition(tsapi.ProxyGroupReady, metav1.ConditionTrue, "", "", clock), + } + }) + // Quirks of the fake client. + mustUpdateStatus(t, fc, "default", "test", func(svc *corev1.Service) { + svc.Status.Conditions = []metav1.Condition{} + }) + expectReconciled(t, esr, "default", "test") + // Verify that a ClusterIP Service has been created. + name := findGenNameForEgressSvcResources(t, fc, svc) + expectEqual(t, fc, clusterIPSvc(name, svc), removeTargetPortsFromSvc) + clusterSvc := mustGetClusterIPSvc(t, fc, name) + // Verify that an EndpointSlice has been created. + expectEqual(t, fc, endpointSlice(name, svc, clusterSvc), nil) + // Verify that ConfigMap contains configuration for the new egress service. + mustHaveConfigForSvc(t, fc, svc, clusterSvc, cm) + r := svcConfiguredReason(svc, true, zl.Sugar()) + // Verify that the user-created ExternalName Service has Configured set to true and ExternalName pointing to the + // CluterIP Service. + svc.Status.Conditions = []metav1.Condition{ + condition(tsapi.EgressSvcConfigured, metav1.ConditionTrue, r, r, clock), + } + svc.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} + svc.Spec.ExternalName = fmt.Sprintf("%s.operator-ns.svc.cluster.local", name) + expectEqual(t, fc, svc, nil) + }) + + t.Run("delete_external_name_service", func(t *testing.T) { + name := findGenNameForEgressSvcResources(t, fc, svc) + if err := fc.Delete(context.Background(), svc); err != nil { + t.Fatalf("error deleting ExternalName Service: %v", err) + } + expectReconciled(t, esr, "default", "test") + // Verify that ClusterIP Service and EndpointSlice have been deleted. + expectMissing[corev1.Service](t, fc, "operator-ns", name) + expectMissing[discoveryv1.EndpointSlice](t, fc, "operator-ns", fmt.Sprintf("%s-ipv4", name)) + // Verify that service config has been deleted from the ConfigMap. + mustNotHaveConfigForSvc(t, fc, svc, cm) + }) +} + +func condition(typ tsapi.ConditionType, st metav1.ConditionStatus, r, msg string, clock tstime.Clock) metav1.Condition { + return metav1.Condition{ + Type: string(typ), + Status: st, + LastTransitionTime: conditionTime(clock), + Reason: r, + Message: msg, + } +} + +func findGenNameForEgressSvcResources(t *testing.T, client client.Client, svc *corev1.Service) string { + t.Helper() + labels := egressSvcChildResourceLabels(svc) + s, err := getSingleObject[corev1.Service](context.Background(), client, "operator-ns", labels) + if err != nil { + t.Fatalf("finding ClusterIP Service for ExternalName Service %s: %v", svc.Name, err) + } + if s == nil { + t.Fatalf("no ClusterIP Service found for ExternalName Service %q", svc.Name) + } + return s.GetName() +} + +func clusterIPSvc(name string, extNSvc *corev1.Service) *corev1.Service { + labels := egressSvcChildResourceLabels(extNSvc) + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "operator-ns", + GenerateName: fmt.Sprintf("ts-%s-", extNSvc.Name), + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: extNSvc.Spec.Ports, + }, + } +} + +func mustGetClusterIPSvc(t *testing.T, cl client.Client, name string) *corev1.Service { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "operator-ns", + }, + } + if err := cl.Get(context.Background(), client.ObjectKeyFromObject(svc), svc); err != nil { + t.Fatalf("error retrieving Service") + } + return svc +} + +func endpointSlice(name string, extNSvc, clusterIPSvc *corev1.Service) *discoveryv1.EndpointSlice { + labels := egressSvcChildResourceLabels(extNSvc) + labels[discoveryv1.LabelManagedBy] = "tailscale.com" + labels[discoveryv1.LabelServiceName] = name + return &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-ipv4", name), + Namespace: "operator-ns", + Labels: labels, + }, + Ports: portsForEndpointSlice(clusterIPSvc), + AddressType: discoveryv1.AddressTypeIPv4, + } +} + +func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort { + ports := make([]discoveryv1.EndpointPort, 0) + for _, p := range svc.Spec.Ports { + ports = append(ports, discoveryv1.EndpointPort{ + Name: &p.Name, + Protocol: &p.Protocol, + Port: pointer.ToInt32(p.TargetPort.IntVal), + }) + } + return ports +} + +func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap) { + t.Helper() + wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc) + if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { + t.Fatalf("Error retrieving ConfigMap: %v", err) + } + name := tailnetSvcName(extNSvc) + gotCfg := configFromCM(t, cm, name) + if gotCfg == nil { + t.Fatalf("No config found for service %q", name) + } + if diff := cmp.Diff(*gotCfg, wantsCfg); diff != "" { + t.Fatalf("unexpected config for service %q (-got +want):\n%s", name, diff) + } +} + +func mustNotHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc *corev1.Service, cm *corev1.ConfigMap) { + t.Helper() + if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { + t.Fatalf("Error retrieving ConfigMap: %v", err) + } + name := tailnetSvcName(extNSvc) + gotCfg := configFromCM(t, cm, name) + if gotCfg != nil { + t.Fatalf("Config %#+v for service %q found when it should not be present", gotCfg, name) + } +} + +func configFromCM(t *testing.T, cm *corev1.ConfigMap, svcName string) *egressservices.Config { + t.Helper() + cfgBs, ok := cm.BinaryData[egressservices.KeyEgressServices] + if !ok { + return nil + } + cfgs := &egressservices.Configs{} + if err := json.Unmarshal(cfgBs, cfgs); err != nil { + t.Fatalf("error unmarshalling config: %v", err) + } + cfg, ok := (*cfgs)[svcName] + if ok { + return &cfg + } + return nil +} diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 539dad275d448..25435a47cf14a 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -25,11 +25,13 @@ const ( proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" + proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" + proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalEnd = "{{- end -}}" @@ -146,6 +148,7 @@ func generate(baseDir string) error { {proxyClassCRDPath, proxyClassCRDHelmTemplatePath}, {dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath}, {recorderCRDPath, recorderCRDHelmTemplatePath}, + {proxyGroupCRDPath, proxyGroupCRDHelmTemplatePath}, } { if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil { return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err) @@ -161,6 +164,7 @@ func cleanup(baseDir string) error { proxyClassCRDHelmTemplatePath, dnsConfigCRDHelmTemplatePath, recorderCRDHelmTemplatePath, + proxyGroupCRDHelmTemplatePath, } { if err := os.Remove(filepath.Join(baseDir, path)); err != nil && !os.IsNotExist(err) { return fmt.Errorf("error cleaning up %s: %w", path, err) diff --git a/cmd/k8s-operator/generate/main_test.go b/cmd/k8s-operator/generate/main_test.go index d465cde7bfba1..c7956dcdbef8f 100644 --- a/cmd/k8s-operator/generate/main_test.go +++ b/cmd/k8s-operator/generate/main_test.go @@ -62,6 +62,9 @@ func Test_generate(t *testing.T) { if !strings.Contains(installContentsWithCRD.String(), "name: recorders.tailscale.com") { t.Errorf("Recorder CRD not found in default chart install") } + if !strings.Contains(installContentsWithCRD.String(), "name: proxygroups.tailscale.com") { + t.Errorf("ProxyGroup CRD not found in default chart install") + } // Test that CRDs can be excluded from Helm chart install installContentsWithoutCRD := bytes.NewBuffer([]byte{}) @@ -83,4 +86,7 @@ func Test_generate(t *testing.T) { if strings.Contains(installContentsWithoutCRD.String(), "name: recorders.tailscale.com") { t.Errorf("Recorder CRD found in chart install that should not contain a CRD") } + if strings.Contains(installContentsWithoutCRD.String(), "name: proxygroups.tailscale.com") { + t.Errorf("ProxyGroup CRD found in chart install that should not contain a CRD") + } } diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 700cf4be8a84f..acc90d465093a 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -48,7 +48,7 @@ type IngressReconciler struct { // managing. This is only used for metrics. managedIngresses set.Slice[types.UID] - proxyDefaultClass string + defaultProxyClass string } var ( @@ -136,7 +136,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } } - proxyClass := proxyClassForObject(ing, a.proxyDefaultClass) + proxyClass := proxyClassForObject(ing, a.defaultProxyClass) if proxyClass != "" { if ready, err := proxyClassIsReady(ctx, proxyClass, a.Client); err != nil { return fmt.Errorf("error verifying ProxyClass for Ingress: %w", err) diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 8b18776b43c7b..38a041dde07f9 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -253,7 +253,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { pc.Status = tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, }}} }) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 6fa1366cd38bf..bd9c0f7bcd5b0 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -109,7 +109,7 @@ func main() { proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer, proxyTags: tags, proxyFirewallMode: tsFirewallMode, - proxyDefaultClass: defaultProxyClass, + defaultProxyClass: defaultProxyClass, } runReconcilers(rOpts) } @@ -238,6 +238,7 @@ func runReconcilers(opts reconcilerOpts) { ByObject: map[client.Object]cache.ByObject{ &corev1.Secret{}: nsFilter, &corev1.ServiceAccount{}: nsFilter, + &corev1.Pod{}: nsFilter, &corev1.ConfigMap{}: nsFilter, &appsv1.StatefulSet{}: nsFilter, &appsv1.Deployment{}: nsFilter, @@ -285,7 +286,7 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, tsNamespace: opts.tailscaleNamespace, clock: tstime.DefaultClock{}, - proxyDefaultClass: opts.proxyDefaultClass, + defaultProxyClass: opts.defaultProxyClass, }) if err != nil { startlog.Fatalf("could not create service reconciler: %v", err) @@ -308,7 +309,7 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, Client: mgr.GetClient(), logger: opts.log.Named("ingress-reconciler"), - proxyDefaultClass: opts.proxyDefaultClass, + defaultProxyClass: opts.defaultProxyClass, }) if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) @@ -353,6 +354,65 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create nameserver reconciler: %v", err) } + + egressSvcFilter := handler.EnqueueRequestsFromMapFunc(egressSvcsHandler) + egressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(egressSvcsFromEgressProxyGroup(mgr.GetClient(), opts.log)) + err = builder. + ControllerManagedBy(mgr). + Named("egress-svcs-reconciler"). + Watches(&corev1.Service{}, egressSvcFilter). + Watches(&tsapi.ProxyGroup{}, egressProxyGroupFilter). + Complete(&egressSvcsReconciler{ + Client: mgr.GetClient(), + tsNamespace: opts.tailscaleNamespace, + recorder: eventRecorder, + clock: tstime.DefaultClock{}, + logger: opts.log.Named("egress-svcs-reconciler"), + }) + if err != nil { + startlog.Fatalf("could not create egress Services reconciler: %v", err) + } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(corev1.Service), indexEgressProxyGroup, indexEgressServices); err != nil { + startlog.Fatalf("failed setting up indexer for egress Services: %v", err) + } + + egressSvcFromEpsFilter := handler.EnqueueRequestsFromMapFunc(egressSvcFromEps) + err = builder. + ControllerManagedBy(mgr). + Named("egress-svcs-readiness-reconciler"). + Watches(&corev1.Service{}, egressSvcFilter). + Watches(&discoveryv1.EndpointSlice{}, egressSvcFromEpsFilter). + Complete(&egressSvcsReadinessReconciler{ + Client: mgr.GetClient(), + tsNamespace: opts.tailscaleNamespace, + clock: tstime.DefaultClock{}, + logger: opts.log.Named("egress-svcs-readiness-reconciler"), + }) + if err != nil { + startlog.Fatalf("could not create egress Services readiness reconciler: %v", err) + } + + epsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsHandler) + podsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromPGPods(mgr.GetClient(), opts.tailscaleNamespace)) + secretsFilter := handler.EnqueueRequestsFromMapFunc(egressEpsFromPGStateSecrets(mgr.GetClient(), opts.tailscaleNamespace)) + epsFromExtNSvcFilter := handler.EnqueueRequestsFromMapFunc(epsFromExternalNameService(mgr.GetClient(), opts.log, opts.tailscaleNamespace)) + + err = builder. + ControllerManagedBy(mgr). + Named("egress-eps-reconciler"). + Watches(&discoveryv1.EndpointSlice{}, epsFilter). + Watches(&corev1.Pod{}, podsFilter). + Watches(&corev1.Secret{}, secretsFilter). + Watches(&corev1.Service{}, epsFromExtNSvcFilter). + Complete(&egressEpsReconciler{ + Client: mgr.GetClient(), + tsNamespace: opts.tailscaleNamespace, + logger: opts.log.Named("egress-eps-reconciler"), + }) + if err != nil { + startlog.Fatalf("could not create egress EndpointSlices reconciler: %v", err) + } + err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyClass{}). Complete(&ProxyClassReconciler{ @@ -414,6 +474,34 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create Recorder reconciler: %v", err) } + // Recorder reconciler. + ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) + proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) + err = builder.ControllerManagedBy(mgr). + For(&tsapi.ProxyGroup{}). + Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). + Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). + Watches(&corev1.Secret{}, ownedByProxyGroupFilter). + Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). + Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). + Watches(&tsapi.ProxyClass{}, proxyClassFilterForProxyGroup). + Complete(&ProxyGroupReconciler{ + recorder: eventRecorder, + Client: mgr.GetClient(), + l: opts.log.Named("proxygroup-reconciler"), + clock: tstime.DefaultClock{}, + tsClient: opts.tsClient, + + tsNamespace: opts.tailscaleNamespace, + proxyImage: opts.proxyImage, + defaultTags: strings.Split(opts.proxyTags, ","), + tsFirewallMode: opts.proxyFirewallMode, + defaultProxyClass: opts.defaultProxyClass, + }) + if err != nil { + startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) + } + startlog.Infof("Startup complete, operator running, version: %s", version.Long()) if err := mgr.Start(signals.SetupSignalHandler()); err != nil { startlog.Fatalf("could not start manager: %v", err) @@ -454,10 +542,10 @@ type reconcilerOpts struct { // Auto is usually the best choice, unless you want to explicitly set // specific mode for debugging purposes. proxyFirewallMode string - // proxyDefaultClass is the name of the ProxyClass to use as the default + // defaultProxyClass is the name of the ProxyClass to use as the default // class for proxies that do not have a ProxyClass set. // this is defined by an operator env variable. - proxyDefaultClass string + defaultProxyClass string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each @@ -646,6 +734,27 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger) } } +// proxyClassHandlerForConnector returns a handler that, for a given ProxyClass, +// returns a list of reconcile requests for all Connectors that have +// .spec.proxyClass set. +func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ProxyClass: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + proxyClassName := o.GetName() + for _, pg := range pgList.Items { + if pg.Spec.ProxyClass == proxyClassName { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + } + return reqs + } +} + // serviceHandlerForIngress returns a handler for Service events for ingress // reconciler that ensures that if the Service associated with an event is of // interest to the reconciler, the associated Ingress(es) gets be reconciled. @@ -687,6 +796,10 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handl } func serviceHandler(_ context.Context, o client.Object) []reconcile.Request { + if _, ok := o.GetAnnotations()[AnnotationProxyGroup]; ok { + // Do not reconcile Services for ProxyGroup. + return nil + } if isManagedByType(o, "svc") { // If this is a Service managed by a Service we want to enqueue its parent return []reconcile.Request{{NamespacedName: parentFromObjectLabels(o)}} @@ -712,3 +825,195 @@ func isMagicDNSName(name string) bool { validMagicDNSName := regexp.MustCompile(`^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+\.ts\.net\.?$`) return validMagicDNSName.MatchString(name) } + +// egressSvcsHandler returns accepts a Kubernetes object and returns a reconcile +// request for it , if the object is a Tailscale egress Service meant to be +// exposed on a ProxyGroup. +func egressSvcsHandler(_ context.Context, o client.Object) []reconcile.Request { + if !isEgressSvcForProxyGroup(o) { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: o.GetNamespace(), + Name: o.GetName(), + }, + }, + } +} + +// egressEpsHandler returns accepts an EndpointSlice and, if the EndpointSlice +// is for an egress service, returns a reconcile request for it. +func egressEpsHandler(_ context.Context, o client.Object) []reconcile.Request { + if typ := o.GetLabels()[labelSvcType]; typ != typeEgress { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: o.GetNamespace(), + Name: o.GetName(), + }, + }, + } +} + +// egressEpsFromEgressPods returns a Pod event handler that checks if Pod is a replica for a ProxyGroup and if it is, +// returns reconciler requests for all egress EndpointSlices for that ProxyGroup. +func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + return nil + } + // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we + // have ingress ProxyGroups. + if typ := o.GetLabels()[LabelParentType]; typ != "proxygroup" { + return nil + } + pg, ok := o.GetLabels()[LabelParentName] + if !ok { + return nil + } + return reconcileRequestsForPG(pg, cl, ns) + } +} + +// egressEpsFromPGStateSecrets returns a Secret event handler that checks if Secret is a state Secret for a ProxyGroup and if it is, +// returns reconciler requests for all egress EndpointSlices for that ProxyGroup. +func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + return nil + } + // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we + // have ingress ProxyGroups. + if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { + return nil + } + if secretType := o.GetLabels()[labelSecretType]; secretType != "state" { + return nil + } + pg, ok := o.GetLabels()[LabelParentName] + if !ok { + return nil + } + return reconcileRequestsForPG(pg, cl, ns) + } +} + +// egressSvcFromEps is an event handler for EndpointSlices. If an EndpointSlice is for an egress ExternalName Service +// meant to be exposed on a ProxyGroup, returns a reconcile request for the Service. +func egressSvcFromEps(_ context.Context, o client.Object) []reconcile.Request { + if typ := o.GetLabels()[labelSvcType]; typ != typeEgress { + return nil + } + if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + return nil + } + svcName, ok := o.GetLabels()[LabelParentName] + if !ok { + return nil + } + svcNs, ok := o.GetLabels()[LabelParentNamespace] + if !ok { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: svcNs, + Name: svcName, + }, + }, + } +} + +func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile.Request { + epsList := discoveryv1.EndpointSliceList{} + if err := cl.List(context.Background(), &epsList, + client.InNamespace(ns), + client.MatchingLabels(map[string]string{labelProxyGroup: pg})); err != nil { + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ep := range epsList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: ep.Namespace, + Name: ep.Name, + }, + }) + } + return reqs +} + +// egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all +// user-created ExternalName Services that should be exposed on this ProxyGroup. +func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + pg, ok := o.(*tsapi.ProxyGroup) + if !ok { + logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") + return nil + } + if pg.Spec.Type != tsapi.ProxyGroupTypeEgress { + return nil + } + svcList := &corev1.ServiceList{} + if err := cl.List(context.Background(), svcList, client.MatchingFields{indexEgressProxyGroup: pg.Name}); err != nil { + logger.Infof("error listing Services: %v, skipping a reconcile for event on ProxyGroup %s", err, pg.Name) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, svc := range svcList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: svc.Namespace, + Name: svc.Name, + }, + }) + } + return reqs + } +} + +// epsFromExternalNameService is an event handler for ExternalName Services that define a Tailscale egress service that +// should be exposed on a ProxyGroup. It returns reconcile requests for EndpointSlices created for this Service. +func epsFromExternalNameService(cl client.Client, logger *zap.SugaredLogger, ns string) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + svc, ok := o.(*corev1.Service) + if !ok { + logger.Infof("[unexpected] Service handler triggered for an object that is not a Service") + return nil + } + if !isEgressSvcForProxyGroup(svc) { + return nil + } + epsList := &discoveryv1.EndpointSliceList{} + if err := cl.List(context.Background(), epsList, client.InNamespace(ns), + client.MatchingLabels(egressSvcChildResourceLabels(svc))); err != nil { + logger.Infof("error listing EndpointSlices: %v, skipping a reconcile for event on Service %s", err, svc.Name) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, eps := range epsList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: eps.Namespace, + Name: eps.Name, + }, + }) + } + return reqs + } +} + +// indexEgressServices adds a local index to a cached Tailscale egress Services meant to be exposed on a ProxyGroup. The +// index is used a list filter. +func indexEgressServices(o client.Object) []string { + if !isEgressSvcForProxyGroup(o) { + return nil + } + return []string{o.GetAnnotations()[AnnotationProxyGroup]} +} diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 8b08e9ffadec9..21e1d4313749e 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1064,7 +1064,7 @@ func TestProxyClassForService(t *testing.T) { pc.Status = tsapi.ProxyClassStatus{ Conditions: []metav1.Condition{{ Status: metav1.ConditionTrue, - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, }}} }) @@ -1487,6 +1487,72 @@ func Test_clusterDomainFromResolverConf(t *testing.T) { }) } } +func Test_authKeyRemoval(t *testing.T) { + fc := fake.NewFakeClient() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + // 1. A new Service that should be exposed via Tailscale gets created, a Secret with a config that contains auth + // key is generated. + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + clock: clock, + } + + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + }, + }) + + expectReconciled(t, sr, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "svc") + opts := configOpts{ + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "svc", + hostname: "default-test", + clusterTargetIP: "10.20.30.40", + app: kubetypes.AppIngressProxy, + } + + expectEqual(t, fc, expectedSecret(t, fc, opts), nil) + expectEqual(t, fc, expectedHeadlessService(shortName, "svc"), nil) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation) + + // 2. Apply update to the Secret that imitates the proxy setting device_id. + s := expectedSecret(t, fc, opts) + mustUpdate(t, fc, s.Namespace, s.Name, func(s *corev1.Secret) { + mak.Set(&s.Data, "device_id", []byte("dkkdi4CNTRL")) + }) + + // 3. Config should no longer contain auth key + expectReconciled(t, sr, "default", "test") + opts.shouldRemoveAuthKey = true + opts.secretExtraData = map[string][]byte{"device_id": []byte("dkkdi4CNTRL")} + expectEqual(t, fc, expectedSecret(t, fc, opts), nil) +} func Test_externalNameService(t *testing.T) { fc := fake.NewFakeClient() diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index b5d213746c0c9..882a9030fa75d 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -98,9 +98,9 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re if errs := pcr.validate(pc); errs != nil { msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error()) pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg) - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) } else { - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger) } if !apiequality.Semantic.DeepEqual(oldPCStatus, pc.Status) { if err := pcr.Client.Status().Update(ctx, pc); err != nil { diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index c52fbb187169c..eb68811fc6b94 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -69,7 +69,7 @@ func TestProxyClass(t *testing.T) { // 1. A valid ProxyClass resource gets its status updated to Ready. expectReconciled(t, pcr, "", "test") pc.Status.Conditions = append(pc.Status.Conditions, metav1.Condition{ - Type: string(tsapi.ProxyClassready), + Type: string(tsapi.ProxyClassReady), Status: metav1.ConditionTrue, Reason: reasonProxyClassValid, Message: reasonProxyClassValid, @@ -85,7 +85,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") msg := `ProxyClass is not valid: .spec.statefulSet.labels: Invalid value: "?!someVal": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')` - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) expectedEvent := "Warning ProxyClassInvalid ProxyClass is not valid: .spec.statefulSet.labels: Invalid value: \"?!someVal\": a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')" expectEvents(t, fr, []string{expectedEvent}) @@ -99,7 +99,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.statefulSet.pod.tailscaleContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) expectedEvent = `Warning ProxyClassInvalid ProxyClass is not valid: spec.statefulSet.pod.tailscaleContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` expectEvents(t, fr, []string{expectedEvent}) @@ -118,7 +118,7 @@ func TestProxyClass(t *testing.T) { }) expectReconciled(t, pcr, "", "test") msg = `ProxyClass is not valid: spec.statefulSet.pod.tailscaleInitContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` - tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassready, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) + tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, 0, cl, zl.Sugar()) expectEqual(t, fc, pc, nil) expectedEvent = `Warning ProxyClassInvalid ProxyClass is not valid: spec.statefulSet.pod.tailscaleInitContainer.image: Invalid value: "FOO bar": invalid reference format: repository name (library/FOO bar) must be lowercase` expectEvents(t, fr, []string{expectedEvent}) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go new file mode 100644 index 0000000000000..1f9983aa98962 --- /dev/null +++ b/cmd/k8s-operator/proxygroup.go @@ -0,0 +1,533 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "net/http" + "slices" + "sync" + + "github.com/pkg/errors" + "go.uber.org/zap" + xslices "golang.org/x/exp/slices" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" + "tailscale.com/ipn" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/types/ptr" + "tailscale.com/util/clientmetric" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +const ( + reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" + reasonProxyGroupReady = "ProxyGroupReady" + reasonProxyGroupCreating = "ProxyGroupCreating" + reasonProxyGroupInvalid = "ProxyGroupInvalid" +) + +var gaugeProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupCount) + +// ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. +type ProxyGroupReconciler struct { + client.Client + l *zap.SugaredLogger + recorder record.EventRecorder + clock tstime.Clock + tsClient tsClient + + // User-specified defaults from the helm installation. + tsNamespace string + proxyImage string + defaultTags []string + tsFirewallMode string + defaultProxyClass string + + mu sync.Mutex // protects following + proxyGroups set.Slice[types.UID] // for proxygroups gauge +} + +func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { + return r.l.With("ProxyGroup", name) +} + +func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { + logger := r.logger(req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + pg := new(tsapi.ProxyGroup) + err = r.Get(ctx, req.NamespacedName, pg) + if apierrors.IsNotFound(err) { + logger.Debugf("ProxyGroup not found, assuming it was deleted") + return reconcile.Result{}, nil + } else if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com ProxyGroup: %w", err) + } + if markedForDeletion(pg) { + logger.Debugf("ProxyGroup is being deleted, cleaning up resources") + ix := xslices.Index(pg.Finalizers, FinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return reconcile.Result{}, nil + } + + if done, err := r.maybeCleanup(ctx, pg); err != nil { + return reconcile.Result{}, err + } else if !done { + logger.Debugf("ProxyGroup resource cleanup not yet finished, will retry...") + return reconcile.Result{RequeueAfter: shortRequeue}, nil + } + + pg.Finalizers = slices.Delete(pg.Finalizers, ix, ix+1) + if err := r.Update(ctx, pg); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + oldPGStatus := pg.Status.DeepCopy() + setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) + if !apiequality.Semantic.DeepEqual(oldPGStatus, pg.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + err = errors.Wrap(err, updateErr.Error()) + } + } + return reconcile.Result{}, err + } + + if !slices.Contains(pg.Finalizers, FinalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to log that the high level, multi-reconcile + // operation is underway. + logger.Infof("ensuring ProxyGroup is set up") + pg.Finalizers = append(pg.Finalizers, FinalizerName) + if err = r.Update(ctx, pg); err != nil { + err = fmt.Errorf("error adding finalizer: %w", err) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, reasonProxyGroupCreationFailed) + } + } + + if err = r.validate(pg); err != nil { + message := fmt.Sprintf("ProxyGroup is invalid: %s", err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupInvalid, message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupInvalid, message) + } + + proxyClassName := r.defaultProxyClass + if pg.Spec.ProxyClass != "" { + proxyClassName = pg.Spec.ProxyClass + } + + var proxyClass *tsapi.ProxyClass + if proxyClassName != "" { + proxyClass = new(tsapi.ProxyClass) + err := r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass) + if apierrors.IsNotFound(err) { + err = nil + message := fmt.Sprintf("the ProxyGroup's ProxyClass %s does not (yet) exist", proxyClassName) + logger.Info(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + if err != nil { + err = fmt.Errorf("error getting ProxyGroup's ProxyClass %s: %s", proxyClassName, err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + } + if !tsoperator.ProxyClassIsReady(proxyClass) { + message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName) + logger.Info(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + } + + if err = r.maybeProvision(ctx, pg, proxyClass); err != nil { + err = fmt.Errorf("error provisioning ProxyGroup resources: %w", err) + r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + } + + desiredReplicas := int(pgReplicas(pg)) + if len(pg.Status.Devices) < desiredReplicas { + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) + logger.Debug(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + + if len(pg.Status.Devices) > desiredReplicas { + message := fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) + logger.Debug(message) + return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + } + + logger.Info("ProxyGroup resources synced") + return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) +} + +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { + logger := r.logger(pg.Name) + r.mu.Lock() + r.proxyGroups.Add(pg.UID) + gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) + r.mu.Unlock() + + cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) + if err != nil { + return fmt.Errorf("error provisioning config Secrets: %w", err) + } + // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. + stateSecrets := pgStateSecrets(pg, r.tsNamespace) + for _, sec := range stateSecrets { + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { + s.ObjectMeta.Labels = sec.ObjectMeta.Labels + s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning state Secrets: %w", err) + } + } + sa := pgServiceAccount(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + s.ObjectMeta.Labels = sa.ObjectMeta.Labels + s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning ServiceAccount: %w", err) + } + role := pgRole(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + r.ObjectMeta.Labels = role.ObjectMeta.Labels + r.ObjectMeta.Annotations = role.ObjectMeta.Annotations + r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences + r.Rules = role.Rules + }); err != nil { + return fmt.Errorf("error provisioning Role: %w", err) + } + roleBinding := pgRoleBinding(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { + r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels + r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations + r.ObjectMeta.OwnerReferences = roleBinding.ObjectMeta.OwnerReferences + r.RoleRef = roleBinding.RoleRef + r.Subjects = roleBinding.Subjects + }); err != nil { + return fmt.Errorf("error provisioning RoleBinding: %w", err) + } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + cm := pgEgressCM(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { + existing.ObjectMeta.Labels = cm.ObjectMeta.Labels + existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences + }); err != nil { + return fmt.Errorf("error provisioning ConfigMap: %w", err) + } + } + ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, cfgHash) + if err != nil { + return fmt.Errorf("error generating StatefulSet spec: %w", err) + } + ss = applyProxyClassToStatefulSet(proxyClass, ss, nil, logger) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + s.ObjectMeta.Labels = ss.ObjectMeta.Labels + s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences + s.Spec = ss.Spec + }); err != nil { + return fmt.Errorf("error provisioning StatefulSet: %w", err) + } + + if err := r.cleanupDanglingResources(ctx, pg); err != nil { + return fmt.Errorf("error cleaning up dangling resources: %w", err) + } + + devices, err := r.getDeviceInfo(ctx, pg) + if err != nil { + return fmt.Errorf("failed to get device info: %w", err) + } + + pg.Status.Devices = devices + + return nil +} + +// cleanupDanglingResources ensures we don't leak config secrets, state secrets, and +// tailnet devices when the number of replicas specified is reduced. +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup) error { + logger := r.logger(pg.Name) + metadata, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return err + } + + for _, m := range metadata { + if m.ordinal+1 <= int(pgReplicas(pg)) { + continue + } + + // Dangling resource, delete the config + state Secrets, as well as + // deleting the device from the tailnet. + if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + return err + } + if err := r.Delete(ctx, m.stateSecret); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting state Secret %s: %w", m.stateSecret.Name, err) + } + } + configSecret := m.stateSecret.DeepCopy() + configSecret.Name += "-config" + if err := r.Delete(ctx, configSecret); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) + } + } + } + + return nil +} + +// maybeCleanup just deletes the device from the tailnet. All the kubernetes +// resources linked to a ProxyGroup will get cleaned up via owner references +// (which we can use because they are all in the same namespace). +func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.ProxyGroup) (bool, error) { + logger := r.logger(pg.Name) + + metadata, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return false, err + } + + for _, m := range metadata { + if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + return false, err + } + } + + logger.Infof("cleaned up ProxyGroup resources") + r.mu.Lock() + r.proxyGroups.Remove(pg.UID) + gaugeProxyGroupResources.Set(int64(r.proxyGroups.Len())) + r.mu.Unlock() + return true, nil +} + +func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { + logger.Debugf("deleting device %s from control", string(id)) + if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) + } else { + return fmt.Errorf("error deleting device: %w", err) + } + } else { + logger.Debugf("device %s deleted from control", string(id)) + } + + return nil +} + +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) { + logger := r.logger(pg.Name) + var allConfigs []tailscaledConfigs + for i := range pgReplicas(pg) { + cfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d-config", pg.Name, i), + Namespace: r.tsNamespace, + Labels: pgSecretLabels(pg.Name, "config"), + OwnerReferences: pgOwnerReference(pg), + }, + } + + var existingCfgSecret *corev1.Secret // unmodified copy of secret + if err := r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { + logger.Debugf("secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) + existingCfgSecret = cfgSecret.DeepCopy() + } else if !apierrors.IsNotFound(err) { + return "", err + } + + var authKey string + if existingCfgSecret == nil { + logger.Debugf("creating authkey for new ProxyGroup proxy") + tags := pg.Spec.Tags.Stringify() + if len(tags) == 0 { + tags = r.defaultTags + } + authKey, err = newAuthKey(ctx, r.tsClient, tags) + if err != nil { + return "", err + } + } + + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) + if err != nil { + return "", fmt.Errorf("error creating tailscaled config: %w", err) + } + allConfigs = append(allConfigs, configs) + + for cap, cfg := range configs { + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return "", fmt.Errorf("error marshalling tailscaled config: %w", err) + } + mak.Set(&cfgSecret.StringData, tsoperator.TailscaledConfigFileName(cap), string(cfgJSON)) + } + + if existingCfgSecret != nil { + logger.Debugf("patching the existing ProxyGroup config Secret %s", cfgSecret.Name) + if err := r.Patch(ctx, cfgSecret, client.MergeFrom(existingCfgSecret)); err != nil { + return "", err + } + } else { + logger.Debugf("creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) + if err := r.Create(ctx, cfgSecret); err != nil { + return "", err + } + } + } + + sum := sha256.New() + b, err := json.Marshal(allConfigs) + if err != nil { + return "", err + } + if _, err := sum.Write(b); err != nil { + return "", err + } + + return fmt.Sprintf("%x", sum.Sum(nil)), nil +} + +func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { + conf := &ipn.ConfigVAlpha{ + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", // AcceptRoutes defaults to true + Locked: "false", + Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + } + + if pg.Spec.HostnamePrefix != "" { + conf.Hostname = ptr.To(fmt.Sprintf("%s%d", pg.Spec.HostnamePrefix, idx)) + } + + if shouldAcceptRoutes(class) { + conf.AcceptRoutes = "true" + } + + deviceAuthed := false + for _, d := range pg.Status.Devices { + if d.Hostname == *conf.Hostname { + deviceAuthed = true + break + } + } + + if authKey != "" { + conf.AuthKey = &authKey + } else if !deviceAuthed { + key, err := authKeyFromSecret(oldSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) + } + conf.AuthKey = key + } + capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) + capVerConfigs[106] = *conf + return capVerConfigs, nil +} + +func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { + return nil +} + +// getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by +// querying their state Secrets. It may not return the same number of items as +// specified in the ProxyGroup spec if e.g. it is getting scaled up or down, or +// some pods have failed to write state. +func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { + // List all state secrets owned by this ProxyGroup. + secrets := &corev1.SecretList{} + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { + return nil, fmt.Errorf("failed to list state Secrets: %w", err) + } + for _, secret := range secrets.Items { + var ordinal int + if _, err := fmt.Sscanf(secret.Name, pg.Name+"-%d", &ordinal); err != nil { + return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) + } + + id, dnsName, ok, err := getNodeMetadata(ctx, &secret) + if err != nil { + return nil, err + } + if !ok { + continue + } + + metadata = append(metadata, nodeMetadata{ + ordinal: ordinal, + stateSecret: &secret, + tsID: id, + dnsName: dnsName, + }) + } + + return metadata, nil +} + +func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { + metadata, err := r.getNodeMetadata(ctx, pg) + if err != nil { + return nil, err + } + + for _, m := range metadata { + device, ok, err := getDeviceInfo(ctx, r.tsClient, m.stateSecret) + if err != nil { + return nil, err + } + if !ok { + continue + } + devices = append(devices, tsapi.TailnetDevice{ + Hostname: device.Hostname, + TailnetIPs: device.TailnetIPs, + }) + } + + return devices, nil +} + +type nodeMetadata struct { + ordinal int + stateSecret *corev1.Secret + tsID tailcfg.StableNodeID + dnsName string +} diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go new file mode 100644 index 0000000000000..9aa7ac3b008a3 --- /dev/null +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -0,0 +1,294 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/egressservices" + "tailscale.com/types/ptr" +) + +// Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be +// applied over the top after. +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode, cfgHash string) (*appsv1.StatefulSet, error) { + ss := new(appsv1.StatefulSet) + if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { + return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) + } + // Validate some base assumptions. + if len(ss.Spec.Template.Spec.InitContainers) != 1 { + return nil, fmt.Errorf("[unexpected] base proxy config had %d init containers instead of 1", len(ss.Spec.Template.Spec.InitContainers)) + } + if len(ss.Spec.Template.Spec.Containers) != 1 { + return nil, fmt.Errorf("[unexpected] base proxy config had %d containers instead of 1", len(ss.Spec.Template.Spec.Containers)) + } + + // StatefulSet config. + ss.ObjectMeta = metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + } + ss.Spec.Replicas = ptr.To(pgReplicas(pg)) + ss.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: pgLabels(pg.Name, nil), + } + + // Template config. + tmpl := &ss.Spec.Template + tmpl.ObjectMeta = metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + DeletionGracePeriodSeconds: ptr.To[int64](10), + Annotations: map[string]string{ + podAnnotationLastSetConfigFileHash: cfgHash, + }, + } + tmpl.Spec.ServiceAccountName = pg.Name + tmpl.Spec.InitContainers[0].Image = image + tmpl.Spec.Volumes = func() []corev1.Volume { + var volumes []corev1.Volume + for i := range pgReplicas(pg) { + volumes = append(volumes, corev1.Volume{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%d-config", pg.Name, i), + }, + }, + }) + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + volumes = append(volumes, corev1.Volume{ + Name: pgEgressCMName(pg.Name), + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: pgEgressCMName(pg.Name), + }, + }, + }, + }) + } + + return volumes + }() + + // Main container config. + c := &ss.Spec.Template.Spec.Containers[0] + c.Image = image + c.VolumeMounts = func() []corev1.VolumeMount { + var mounts []corev1.VolumeMount + for i := range pgReplicas(pg) { + mounts = append(mounts, corev1.VolumeMount{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), + }) + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + mounts = append(mounts, corev1.VolumeMount{ + Name: pgEgressCMName(pg.Name), + MountPath: "/etc/proxies", + ReadOnly: true, + }) + } + + return mounts + }() + c.Env = func() []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + // TODO(irbekrm): verify that .status.podIPs are always set, else read in .status.podIP as well. + Name: "POD_IPS", // this will be a comma separate list i.e 10.136.0.6,2600:1900:4011:161:0:e:0:6 + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + // Secret is named after the pod. + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "TS_KUBE_SECRET", + Value: "$(POD_NAME)", + }, + { + Name: "TS_STATE", + Value: "kube:$(POD_NAME)", + }, + { + Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", + Value: "/etc/tsconfig/$(POD_NAME)", + }, + { + Name: "TS_USERSPACE", + Value: "false", + }, + } + + if tsFirewallMode != "" { + envs = append(envs, corev1.EnvVar{ + Name: "TS_DEBUG_FIREWALL_MODE", + Value: tsFirewallMode, + }) + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { + envs = append(envs, corev1.EnvVar{ + Name: "TS_EGRESS_SERVICES_CONFIG_PATH", + Value: fmt.Sprintf("/etc/proxies/%s", egressservices.KeyEgressServices), + }) + } + + return envs + }() + + return ss, nil +} + +func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + } +} + +func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{ + "get", + "patch", + "update", + }, + ResourceNames: func() (secrets []string) { + for i := range pgReplicas(pg) { + secrets = append(secrets, + fmt.Sprintf("%s-%d-config", pg.Name, i), // Config with auth key. + fmt.Sprintf("%s-%d", pg.Name, i), // State. + ) + } + return secrets + }(), + }, + }, + } +} + +func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: pg.Name, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: pg.Name, + }, + } +} + +func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) { + for i := range pgReplicas(pg) { + secrets = append(secrets, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, i), + Namespace: namespace, + Labels: pgSecretLabels(pg.Name, "state"), + OwnerReferences: pgOwnerReference(pg), + }, + }) + } + + return secrets +} + +func pgEgressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgEgressCMName(pg.Name), + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + } +} + +func pgSecretLabels(pgName, typ string) map[string]string { + return pgLabels(pgName, map[string]string{ + labelSecretType: typ, // "config" or "state". + }) +} + +func pgLabels(pgName string, customLabels map[string]string) map[string]string { + l := make(map[string]string, len(customLabels)+3) + for k, v := range customLabels { + l[k] = v + } + + l[LabelManaged] = "true" + l[LabelParentType] = "proxygroup" + l[LabelParentName] = pgName + + return l +} + +func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { + return []metav1.OwnerReference{*metav1.NewControllerRef(owner, tsapi.SchemeGroupVersion.WithKind("ProxyGroup"))} +} + +func pgReplicas(pg *tsapi.ProxyGroup) int32 { + if pg.Spec.Replicas != nil { + return *pg.Spec.Replicas + } + + return 2 +} + +func pgEgressCMName(pg string) string { + return fmt.Sprintf("%s-egress-config", pg) +} diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go new file mode 100644 index 0000000000000..445db7537ddb6 --- /dev/null +++ b/cmd/k8s-operator/proxygroup_test.go @@ -0,0 +1,267 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" + "tailscale.com/types/ptr" +) + +const testProxyImage = "tailscale/tailscale:test" + +var defaultProxyClassAnnotations = map[string]string{ + "some-annotation": "from-the-proxy-class", +} + +func TestProxyGroup(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + }, + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pc). + WithStatusSubresource(pg, pc). + Build() + tsClient := &fakeTSClient{} + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(1) + cl := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + l: zl.Sugar(), + clock: cl, + } + + t.Run("proxyclass_not_ready", func(t *testing.T) { + expectReconciled(t, reconciler, "", pg.Name) + + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + }) + + t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { + pc.Status = tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + } + if err := fc.Status().Update(context.Background(), pc); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + if expected := 1; reconciler.proxyGroups.Len() != expected { + t.Fatalf("expected %d recorders, got %d", expected, reconciler.proxyGroups.Len()) + } + expectProxyGroupResources(t, fc, pg, true) + keyReq := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: false, + Preauthorized: true, + Tags: []string{"tag:test-tag"}, + }, + }, + } + if diff := cmp.Diff(tsClient.KeyRequests(), []tailscale.KeyCapabilities{keyReq, keyReq}); diff != "" { + t.Fatalf("unexpected secrets (-got +want):\n%s", diff) + } + }) + + t.Run("simulate_successful_device_auth", func(t *testing.T) { + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, reconciler, "", pg.Name) + + pg.Status.Devices = []tsapi.TailnetDevice{ + { + Hostname: "hostname-nodeid-0", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + { + Hostname: "hostname-nodeid-1", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true) + }) + + t.Run("scale_up_to_3", func(t *testing.T) { + pg.Spec.Replicas = ptr.To[int32](3) + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.Spec = pg.Spec + }) + expectReconciled(t, reconciler, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + expectEqual(t, fc, pg, nil) + + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, reconciler, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ + Hostname: "hostname-nodeid-2", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }) + expectEqual(t, fc, pg, nil) + expectProxyGroupResources(t, fc, pg, true) + }) + + t.Run("scale_down_to_1", func(t *testing.T) { + pg.Spec.Replicas = ptr.To[int32](1) + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.Spec = pg.Spec + }) + expectReconciled(t, reconciler, "", pg.Name) + pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. + expectEqual(t, fc, pg, nil) + + expectProxyGroupResources(t, fc, pg, true) + }) + + t.Run("delete_and_cleanup", func(t *testing.T) { + if err := fc.Delete(context.Background(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + + expectMissing[tsapi.Recorder](t, fc, "", pg.Name) + if expected := 0; reconciler.proxyGroups.Len() != expected { + t.Fatalf("expected %d ProxyGroups, got %d", expected, reconciler.proxyGroups.Len()) + } + // 2 nodes should get deleted as part of the scale down, and then finally + // the first node gets deleted with the ProxyGroup cleanup. + if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-1", "nodeid-2", "nodeid-0"}); diff != "" { + t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) + } + // The fake client does not clean up objects whose owner has been + // deleted, so we can't test for the owned resources getting deleted. + }) +} + +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool) { + t.Helper() + + role := pgRole(pg, tsNamespace) + roleBinding := pgRoleBinding(pg, tsNamespace) + serviceAccount := pgServiceAccount(pg, tsNamespace) + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", "") + if err != nil { + t.Fatal(err) + } + statefulSet.Annotations = defaultProxyClassAnnotations + + if shouldExist { + expectEqual(t, fc, role, nil) + expectEqual(t, fc, roleBinding, nil) + expectEqual(t, fc, serviceAccount, nil) + expectEqual(t, fc, statefulSet, func(ss *appsv1.StatefulSet) { + ss.Spec.Template.Annotations[podAnnotationLastSetConfigFileHash] = "" + }) + } else { + expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) + expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) + expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name) + expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name) + } + + var expectedSecrets []string + for i := range pgReplicas(pg) { + expectedSecrets = append(expectedSecrets, + fmt.Sprintf("%s-%d", pg.Name, i), + fmt.Sprintf("%s-%d-config", pg.Name, i), + ) + } + expectSecrets(t, fc, expectedSecrets) +} + +func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { + t.Helper() + + secrets := &corev1.SecretList{} + if err := fc.List(context.Background(), secrets); err != nil { + t.Fatal(err) + } + + var actual []string + for _, secret := range secrets.Items { + actual = append(actual, secret.Name) + } + + if diff := cmp.Diff(actual, expected); diff != "" { + t.Fatalf("unexpected secrets (-got +want):\n%s", diff) + } +} + +func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup) { + const key = "profile-abc" + for i := range pgReplicas(pg) { + bytes, err := json.Marshal(map[string]any{ + "Config": map[string]any{ + "NodeID": fmt.Sprintf("nodeid-%d", i), + }, + }) + if err != nil { + t.Fatal(err) + } + + mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { + s.Data = map[string][]byte{ + currentProfileKey: []byte(key), + key: bytes, + } + }) + } +} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 1d87d6c5cef88..6378a82636939 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -47,11 +47,11 @@ const ( LabelParentType = "tailscale.com/parent-resource-type" LabelParentName = "tailscale.com/parent-resource" LabelParentNamespace = "tailscale.com/parent-resource-ns" + labelSecretType = "tailscale.com/secret-type" // "config" or "state". - // LabelProxyClass can be set by users on Connectors, tailscale - // Ingresses and Services that define cluster ingress or cluster egress, - // to specify that configuration in this ProxyClass should be applied to - // resources created for the Connector, Ingress or Service. + // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or + // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for + // the Ingress or Service. LabelProxyClass = "tailscale.com/proxy-class" FinalizerName = "tailscale.com/finalizer" @@ -65,6 +65,8 @@ const ( //MagicDNS name of tailnet node. AnnotationTailnetTargetFQDN = "tailscale.com/tailnet-fqdn" + AnnotationProxyGroup = "tailscale.com/proxy-group" + // Annotations settable by users on ingresses. AnnotationFunnel = "tailscale.com/funnel" @@ -302,7 +304,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaleConfigs, _ error) { +func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaledConfigs, _ error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ // Hardcode a -0 suffix so that in future, if we support @@ -360,7 +362,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * latest := tailcfg.CapabilityVersion(-1) var latestConfig ipn.ConfigVAlpha for key, val := range configs { - fn := tsoperator.TailscaledConfigFileNameForCap(key) + fn := tsoperator.TailscaledConfigFileName(key) b, err := json.Marshal(val) if err != nil { return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) @@ -670,7 +672,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, if pc == nil || ss == nil { return ss } - if pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable { + if stsCfg != nil && pc.Spec.Metrics != nil && pc.Spec.Metrics.Enable { if stsCfg.TailnetTargetFQDN == "" && stsCfg.TailnetTargetIP == "" && !stsCfg.ForwardClusterTrafficViaL7IngressProxy { enableMetrics(ss, pc) } else if stsCfg.ForwardClusterTrafficViaL7IngressProxy { @@ -792,7 +794,7 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) { // TODO (irbekrm): remove the legacy config once we no longer need to support // versions older than cap94, // https://tailscale.com/kb/1236/kubernetes-operator#operator-and-proxies -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaleConfigs, error) { +func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -821,33 +823,12 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co if newAuthkey != "" { conf.AuthKey = &newAuthkey - } else if oldSecret != nil { - var err error - latest := tailcfg.CapabilityVersion(-1) - latestStr := "" - for k, data := range oldSecret.Data { - // write to StringData, read from Data as StringData is write-only - if len(data) == 0 { - continue - } - v, err := tsoperator.CapVerFromFileName(k) - if err != nil { - continue - } - if v > latest { - latestStr = k - latest = v - } - } - // Allow for configs that don't contain an auth key. Perhaps - // users have some mechanisms to delete them. Auth key is - // normally not needed after the initial login. - if latestStr != "" { - conf.AuthKey, err = readAuthKey(oldSecret, latestStr) - if err != nil { - return nil, err - } + } else if shouldRetainAuthKey(oldSecret) { + key, err := authKeyFromSecret(oldSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) } + conf.AuthKey = key } capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) capVerConfigs[95] = *conf @@ -857,6 +838,41 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co return capVerConfigs, nil } +func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { + latest := tailcfg.CapabilityVersion(-1) + latestStr := "" + for k, data := range s.Data { + // write to StringData, read from Data as StringData is write-only + if len(data) == 0 { + continue + } + v, err := tsoperator.CapVerFromFileName(k) + if err != nil { + continue + } + if v > latest { + latestStr = k + latest = v + } + } + // Allow for configs that don't contain an auth key. Perhaps + // users have some mechanisms to delete them. Auth key is + // normally not needed after the initial login. + if latestStr != "" { + return readAuthKey(s, latestStr) + } + return key, nil +} + +// shouldRetainAuthKey returns true if the state stored in a proxy's state Secret suggests that auth key should be +// retained (because the proxy has not yet successfully authenticated). +func shouldRetainAuthKey(s *corev1.Secret) bool { + if s == nil { + return false // nothing to retain here + } + return len(s.Data["device_id"]) == 0 // proxy has not authed yet +} + func shouldAcceptRoutes(pc *tsapi.ProxyClass) bool { return pc != nil && pc.Spec.TailscaleConfig != nil && pc.Spec.TailscaleConfig.AcceptRoutes } @@ -868,7 +884,7 @@ type ptrObject[T any] interface { *T } -type tailscaleConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha +type tailscaledConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha // hashBytes produces a hash for the provided tailscaled config that is the same across // different invocations of this code. We do not use the @@ -879,7 +895,7 @@ type tailscaleConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha // thing that changed is operator version (the hash is also exposed to users via // an annotation and might be confusing if it changes without the config having // changed). -func tailscaledConfigHash(c tailscaleConfigs) (string, error) { +func tailscaledConfigHash(c tailscaledConfigs) (string, error) { b, err := json.Marshal(c) if err != nil { return "", fmt.Errorf("error marshalling tailscaled configs: %w", err) diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 6b2ca3514bed6..f45f922463113 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -64,7 +64,7 @@ type ServiceReconciler struct { clock tstime.Clock - proxyDefaultClass string + defaultProxyClass string } var ( @@ -112,6 +112,10 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err) } + if _, ok := svc.Annotations[AnnotationProxyGroup]; ok { + return reconcile.Result{}, nil // this reconciler should not look at Services for ProxyGroup + } + if !svc.DeletionTimestamp.IsZero() || !a.isTailscaleService(svc) { logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc) @@ -211,7 +215,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - proxyClass := proxyClassForObject(svc, a.proxyDefaultClass) + proxyClass := proxyClassForObject(svc, a.defaultProxyClass) if proxyClass != "" { if ready, err := proxyClassIsReady(ctx, proxyClass, a.Client); err != nil { errMsg := fmt.Errorf("error verifying ProxyClass for Service: %w", err) @@ -354,6 +358,10 @@ func validateService(svc *corev1.Service) []string { violations = append(violations, fmt.Sprintf("invalid value of annotation %s: %q does not appear to be a valid MagicDNS name", AnnotationTailnetTargetFQDN, fqdn)) } } + + // TODO(irbekrm): validate that tailscale.com/tailnet-ip annotation is a + // valid IP address (tailscale/tailscale#13671). + svcName := nameForService(svc) if err := dnsname.ValidLabel(svcName); err != nil { if _, ok := svc.Annotations[AnnotationHostname]; ok { diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 9e37d32a92cfb..6b6297cbdd4fe 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -53,6 +53,8 @@ type configOpts struct { shouldEnableForwardingClusterTrafficViaIngress bool proxyClass string // configuration from the named ProxyClass should be applied to proxy resources app string + shouldRemoveAuthKey bool + secretExtraData map[string][]byte } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -365,6 +367,9 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec conf.AcceptRoutes = "true" } } + if opts.shouldRemoveAuthKey { + conf.AuthKey = nil + } var routes []netip.Prefix if opts.subnetRoutes != "" || opts.isExitNode { r := opts.subnetRoutes @@ -405,6 +410,9 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec labels["tailscale.com/parent-resource-ns"] = "" // Connector is cluster scoped } s.Labels = labels + for key, val := range opts.secretExtraData { + mak.Set(&s.Data, key, val) + } return s } @@ -596,7 +604,7 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) { return &tailscale.Device{ DeviceID: deviceID, - Hostname: "test-device", + Hostname: "hostname-" + deviceID, Addresses: []string{ "1.2.3.4", "::1", @@ -631,6 +639,14 @@ func removeHashAnnotation(sts *appsv1.StatefulSet) { delete(sts.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash) } +func removeTargetPortsFromSvc(svc *corev1.Service) { + newPorts := make([]corev1.ServicePort, 0) + for _, p := range svc.Spec.Ports { + newPorts = append(newPorts, corev1.ServicePort{Protocol: p.Protocol, Port: p.Port}) + } + svc.Spec.Ports = newPorts +} + func removeAuthKeyIfExistsModifier(t *testing.T) func(s *corev1.Secret) { return func(secret *corev1.Secret) { t.Helper() diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 8c9ab236f62f9..cfe38c50af311 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -199,7 +199,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco return fmt.Errorf("error creating StatefulSet: %w", err) } - var devices []tsapi.TailnetDevice + var devices []tsapi.RecorderTailnetDevice device, ok, err := r.getDeviceInfo(ctx, tsr.Name) if err != nil { @@ -302,9 +302,7 @@ func (r *RecorderReconciler) validate(tsr *tsapi.Recorder) error { return nil } -// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName -// is expected to always be non-empty if the node ID is, but not required. -func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { +func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) (*corev1.Secret, error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: r.tsNamespace, @@ -313,12 +311,27 @@ func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string } if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { if apierrors.IsNotFound(err) { - return "", "", false, nil + return nil, nil } + return nil, fmt.Errorf("error getting state Secret: %w", err) + } + + return secret, nil +} + +func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName) + if err != nil || secret == nil { return "", "", false, err } + return getNodeMetadata(ctx, secret) +} + +// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName +// is expected to always be non-empty if the node ID is, but not required. +func getNodeMetadata(ctx context.Context, secret *corev1.Secret) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { // TODO(tomhjp): Should maybe use ipn to parse the following info instead. currentProfile, ok := secret.Data[currentProfileKey] if !ok { @@ -337,20 +350,29 @@ func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string return tailcfg.StableNodeID(profile.Config.NodeID), profile.Config.UserProfile.LoginName, ok, nil } -func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.TailnetDevice, ok bool, err error) { - nodeID, dnsName, ok, err := r.getNodeMetadata(ctx, tsrName) +func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName) + if err != nil || secret == nil { + return tsapi.RecorderTailnetDevice{}, false, err + } + + return getDeviceInfo(ctx, r.tsClient, secret) +} + +func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { + nodeID, dnsName, ok, err := getNodeMetadata(ctx, secret) if !ok || err != nil { - return tsapi.TailnetDevice{}, false, err + return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we // need the API. Should we instead update the profile to include addresses? - device, err := r.tsClient.Device(ctx, string(nodeID), nil) + device, err := tsClient.Device(ctx, string(nodeID), nil) if err != nil { - return tsapi.TailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) + return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } - d = tsapi.TailnetDevice{ + d = tsapi.RecorderTailnetDevice{ Hostname: device.Hostname, TailnetIPs: device.Addresses, } @@ -370,6 +392,6 @@ type profile struct { } `json:"Config"` } -func markedForDeletion(tsr *tsapi.Recorder) bool { - return !tsr.DeletionTimestamp.IsZero() +func markedForDeletion(obj metav1.Object) bool { + return !obj.GetDeletionTimestamp().IsZero() } diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index cff7021051bdf..bd73e8fb9ec26 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -105,9 +105,9 @@ func TestRecorder(t *testing.T) { }) expectReconciled(t, reconciler, "", tsr.Name) - tsr.Status.Devices = []tsapi.TailnetDevice{ + tsr.Status.Devices = []tsapi.RecorderTailnetDevice{ { - Hostname: "test-device", + Hostname: "hostname-nodeid-123", TailnetIPs: []string{"1.2.3.4", "::1"}, URL: "https://test-0.example.ts.net", }, diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index df458755c42bc..d94523c6e4161 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -456,6 +456,11 @@ func (c *connector) ignoreDestination(dstAddrs []netip.Addr) bool { } func proxyTCPConn(c net.Conn, dest string) { + if c.RemoteAddr() == nil { + log.Printf("proxyTCPConn: nil RemoteAddr") + c.Close() + return + } addrPortStr := c.LocalAddr().String() _, port, err := net.SplitHostPort(addrPortStr) if err != nil { diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 09540c833a115..a35f59516ee32 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -91,7 +91,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/sha3 from crypto/internal/mlkem768+ - golang.org/x/net/dns/dnsmessage from net + golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2/hpack from net/http diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index b0658fd953036..d103c8f7e9f5c 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -1448,7 +1448,7 @@ func TestParseNLArgs(t *testing.T) { name: "disablements not allowed", input: []string{"disablement:" + strings.Repeat("02", 32)}, parseKeys: true, - wantErr: fmt.Errorf("parsing key 1: key hex string doesn't have expected type prefix nlpub:"), + wantErr: fmt.Errorf("parsing key 1: key hex string doesn't have expected type prefix tlpub:"), }, { name: "keys not allowed", diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index e98a9e0789657..fdde9ef096ae3 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -844,7 +844,8 @@ func runTS2021(ctx context.Context, args []string) error { if ts2021Args.verbose { logf = log.Printf } - conn, err := (&controlhttp.Dialer{ + + noiseDialer := &controlhttp.Dialer{ Hostname: ts2021Args.host, HTTPPort: "80", HTTPSPort: "443", @@ -853,7 +854,21 @@ func runTS2021(ctx context.Context, args []string) error { ProtocolVersion: uint16(ts2021Args.version), Dialer: dialFunc, Logf: logf, - }).Dial(ctx) + } + const tries = 2 + for i := range tries { + err := tryConnect(ctx, keys.PublicKey, noiseDialer) + if err != nil { + log.Printf("error on attempt %d/%d: %v", i+1, tries, err) + continue + } + break + } + return nil +} + +func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDialer *controlhttp.Dialer) error { + conn, err := noiseDialer.Dial(ctx) log.Printf("controlhttp.Dial = %p, %v", conn, err) if err != nil { return err @@ -861,8 +876,8 @@ func runTS2021(ctx context.Context, args []string) error { log.Printf("did noise handshake") gotPeer := conn.Peer() - if gotPeer != keys.PublicKey { - log.Printf("peer = %v, want %v", gotPeer, keys.PublicKey) + if gotPeer != controlPublic { + log.Printf("peer = %v, want %v", gotPeer, controlPublic) return errors.New("key mismatch") } @@ -894,7 +909,7 @@ func runTS2021(ctx context.Context, args []string) error { // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" - req, err = http.NewRequestWithContext(ctx, "GET", whoamiURL, nil) + req, err := http.NewRequestWithContext(ctx, "GET", whoamiURL, nil) if err != nil { return err } diff --git a/cmd/tailscale/cli/dns-query.go b/cmd/tailscale/cli/dns-query.go new file mode 100644 index 0000000000000..da2d9d2a56d77 --- /dev/null +++ b/cmd/tailscale/cli/dns-query.go @@ -0,0 +1,163 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "flag" + "fmt" + "net/netip" + "os" + "text/tabwriter" + + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/types/dnstype" +) + +func runDNSQuery(ctx context.Context, args []string) error { + if len(args) < 1 { + return flag.ErrHelp + } + name := args[0] + queryType := "A" + if len(args) >= 2 { + queryType = args[1] + } + fmt.Printf("DNS query for %q (%s) using internal resolver:\n", name, queryType) + fmt.Println() + bytes, resolvers, err := localClient.QueryDNS(ctx, name, queryType) + if err != nil { + fmt.Printf("failed to query DNS: %v\n", err) + return nil + } + + if len(resolvers) == 1 { + fmt.Printf("Forwarding to resolver: %v\n", makeResolverString(*resolvers[0])) + } else { + fmt.Println("Multiple resolvers available:") + for _, r := range resolvers { + fmt.Printf(" - %v\n", makeResolverString(*r)) + } + } + fmt.Println() + var p dnsmessage.Parser + header, err := p.Start(bytes) + if err != nil { + fmt.Printf("failed to parse DNS response: %v\n", err) + return err + } + fmt.Printf("Response code: %v\n", header.RCode.String()) + fmt.Println() + p.SkipAllQuestions() + if header.RCode != dnsmessage.RCodeSuccess { + fmt.Println("No answers were returned.") + return nil + } + answers, err := p.AllAnswers() + if err != nil { + fmt.Printf("failed to parse DNS answers: %v\n", err) + return err + } + if len(answers) == 0 { + fmt.Println(" (no answers found)") + } + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "Name\tTTL\tClass\tType\tBody") + fmt.Fprintln(w, "----\t---\t-----\t----\t----") + for _, a := range answers { + fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", a.Header.Name.String(), a.Header.TTL, a.Header.Class.String(), a.Header.Type.String(), makeAnswerBody(a)) + } + w.Flush() + + fmt.Println() + return nil +} + +// makeAnswerBody returns a string with the DNS answer body in a human-readable format. +func makeAnswerBody(a dnsmessage.Resource) string { + switch a.Header.Type { + case dnsmessage.TypeA: + return makeABody(a.Body) + case dnsmessage.TypeAAAA: + return makeAAAABody(a.Body) + case dnsmessage.TypeCNAME: + return makeCNAMEBody(a.Body) + case dnsmessage.TypeMX: + return makeMXBody(a.Body) + case dnsmessage.TypeNS: + return makeNSBody(a.Body) + case dnsmessage.TypeOPT: + return makeOPTBody(a.Body) + case dnsmessage.TypePTR: + return makePTRBody(a.Body) + case dnsmessage.TypeSRV: + return makeSRVBody(a.Body) + case dnsmessage.TypeTXT: + return makeTXTBody(a.Body) + default: + return a.Body.GoString() + } +} + +func makeABody(a dnsmessage.ResourceBody) string { + if a, ok := a.(*dnsmessage.AResource); ok { + return netip.AddrFrom4(a.A).String() + } + return "" +} +func makeAAAABody(aaaa dnsmessage.ResourceBody) string { + if a, ok := aaaa.(*dnsmessage.AAAAResource); ok { + return netip.AddrFrom16(a.AAAA).String() + } + return "" +} +func makeCNAMEBody(cname dnsmessage.ResourceBody) string { + if c, ok := cname.(*dnsmessage.CNAMEResource); ok { + return c.CNAME.String() + } + return "" +} +func makeMXBody(mx dnsmessage.ResourceBody) string { + if m, ok := mx.(*dnsmessage.MXResource); ok { + return fmt.Sprintf("%s (Priority=%d)", m.MX, m.Pref) + } + return "" +} +func makeNSBody(ns dnsmessage.ResourceBody) string { + if n, ok := ns.(*dnsmessage.NSResource); ok { + return n.NS.String() + } + return "" +} +func makeOPTBody(opt dnsmessage.ResourceBody) string { + if o, ok := opt.(*dnsmessage.OPTResource); ok { + return o.GoString() + } + return "" +} +func makePTRBody(ptr dnsmessage.ResourceBody) string { + if p, ok := ptr.(*dnsmessage.PTRResource); ok { + return p.PTR.String() + } + return "" +} +func makeSRVBody(srv dnsmessage.ResourceBody) string { + if s, ok := srv.(*dnsmessage.SRVResource); ok { + return fmt.Sprintf("Target=%s, Port=%d, Priority=%d, Weight=%d", s.Target.String(), s.Port, s.Priority, s.Weight) + } + return "" +} +func makeTXTBody(txt dnsmessage.ResourceBody) string { + if t, ok := txt.(*dnsmessage.TXTResource); ok { + return fmt.Sprintf("%q", t.TXT) + } + return "" +} +func makeResolverString(r dnstype.Resolver) string { + if len(r.BootstrapResolution) > 0 { + return fmt.Sprintf("%s (bootstrap: %v)", r.Addr, r.BootstrapResolution) + } + return fmt.Sprintf("%s", r.Addr) +} diff --git a/cmd/tailscale/cli/dns-status.go b/cmd/tailscale/cli/dns-status.go index 0d59e4b9157f0..e487c66bc331c 100644 --- a/cmd/tailscale/cli/dns-status.go +++ b/cmd/tailscale/cli/dns-status.go @@ -75,7 +75,7 @@ func runDNSStatus(ctx context.Context, args []string) error { fmt.Print("\n") fmt.Println("Split DNS Routes:") if len(dnsConfig.Routes) == 0 { - fmt.Println(" (no routes configured: split DNS might not be in use)") + fmt.Println(" (no routes configured: split DNS disabled)") } for _, k := range slices.Sorted(maps.Keys(dnsConfig.Routes)) { v := dnsConfig.Routes[k] diff --git a/cmd/tailscale/cli/dns.go b/cmd/tailscale/cli/dns.go index 2825556952521..042ce1a94161a 100644 --- a/cmd/tailscale/cli/dns.go +++ b/cmd/tailscale/cli/dns.go @@ -28,8 +28,13 @@ var dnsCmd = &ffcli.Command{ return fs })(), }, - - // TODO: implement `tailscale query` here + { + Name: "query", + ShortUsage: "tailscale dns query [a|aaaa|cname|mx|ns|opt|ptr|srv|txt]", + Exec: runDNSQuery, + ShortHelp: "Perform a DNS query", + LongHelp: "The 'tailscale dns query' subcommand performs a DNS query for the specified name using the internal DNS forwarder (100.100.100.100).\n\nIt also provides information about the resolver(s) used to resolve the query.", + }, // TODO: implement `tailscale log` here diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 7bea1f7249e19..45f989f1057a7 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -151,13 +151,15 @@ func runNetworkLockInit(ctx context.Context, args []string) error { return nil } - fmt.Printf("%d disablement secrets have been generated and are printed below. Take note of them now, they WILL NOT be shown again.\n", nlInitArgs.numDisablements) + var successMsg strings.Builder + + fmt.Fprintf(&successMsg, "%d disablement secrets have been generated and are printed below. Take note of them now, they WILL NOT be shown again.\n", nlInitArgs.numDisablements) for range nlInitArgs.numDisablements { var secret [32]byte if _, err := rand.Read(secret[:]); err != nil { return err } - fmt.Printf("\tdisablement-secret:%X\n", secret[:]) + fmt.Fprintf(&successMsg, "\tdisablement-secret:%X\n", secret[:]) disablementValues = append(disablementValues, tka.DisablementKDF(secret[:])) } @@ -168,7 +170,7 @@ func runNetworkLockInit(ctx context.Context, args []string) error { return err } disablementValues = append(disablementValues, tka.DisablementKDF(supportDisablement)) - fmt.Println("A disablement secret for Tailscale support has been generated and will be transmitted to Tailscale upon initialization.") + fmt.Fprintln(&successMsg, "A disablement secret for Tailscale support has been generated and transmitted to Tailscale.") } // The state returned by NetworkLockInit likely doesn't contain the initialized state, @@ -177,6 +179,7 @@ func runNetworkLockInit(ctx context.Context, args []string) error { return err } + fmt.Print(successMsg.String()) fmt.Println("Initialization complete.") return nil } diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index c0bc3776d16df..e1b828105b8dd 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -32,10 +32,12 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" + "tailscale.com/net/tsaddr" "tailscale.com/safesocket" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/preftype" + "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/version" "tailscale.com/version/distro" @@ -1015,7 +1017,7 @@ func prefsToFlags(env upCheckEnv, prefs *ipn.Prefs) (flagVal map[string]any) { set(prefs.OperatorUser) case "advertise-routes": var sb strings.Builder - for i, r := range withoutExitNodes(prefs.AdvertiseRoutes) { + for i, r := range tsaddr.WithoutExitRoutes(views.SliceOf(prefs.AdvertiseRoutes)).All() { if i > 0 { sb.WriteByte(',') } @@ -1023,7 +1025,7 @@ func prefsToFlags(env upCheckEnv, prefs *ipn.Prefs) (flagVal map[string]any) { } set(sb.String()) case "advertise-exit-node": - set(hasExitNodeRoutes(prefs.AdvertiseRoutes)) + set(tsaddr.ContainsExitRoutes(views.SliceOf(prefs.AdvertiseRoutes))) case "advertise-connector": set(prefs.AppConnector.Advertise) case "snat-subnet-routes": @@ -1057,36 +1059,6 @@ func fmtFlagValueArg(flagName string, val any) string { return fmt.Sprintf("--%s=%v", flagName, shellquote.Join(fmt.Sprint(val))) } -func hasExitNodeRoutes(rr []netip.Prefix) bool { - var v4, v6 bool - for _, r := range rr { - if r.Bits() == 0 { - if r.Addr().Is4() { - v4 = true - } else if r.Addr().Is6() { - v6 = true - } - } - } - return v4 && v6 -} - -// withoutExitNodes returns rr unchanged if it has only 1 or 0 /0 -// routes. If it has both IPv4 and IPv6 /0 routes, then it returns -// a copy with all /0 routes removed. -func withoutExitNodes(rr []netip.Prefix) []netip.Prefix { - if !hasExitNodeRoutes(rr) { - return rr - } - var out []netip.Prefix - for _, r := range rr { - if r.Bits() > 0 { - out = append(out, r) - } - } - return out -} - // exitNodeIP returns the exit node IP from p, using st to map // it from its ID form to an IP address if needed. func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index d9432614fa96e..2c644d1be7d79 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -26,7 +26,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/google/nftables/expr from github.com/google/nftables+ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/google/uuid from tailscale.com/clientupdate+ + DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ @@ -80,7 +80,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli tailscale.com/clientupdate from tailscale.com/client/web+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete @@ -134,7 +134,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ tailscale.com/tsweb/varz from tailscale.com/util/usermetric - tailscale.com/types/dnstype from tailscale.com/tailcfg + tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ tailscale.com/types/key from tailscale.com/client/tailscale+ @@ -171,13 +171,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ + W 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/client/web+ @@ -257,7 +258,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/tls from github.com/miekg/dns+ crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509+ - database/sql/driver from github.com/google/uuid + DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from crypto/internal/nistec+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 018e74fac7bae..6f71a88a93217 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -111,7 +111,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/expr from github.com/google/nftables+ L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ L github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/google/uuid from tailscale.com/clientupdate+ + DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/gorilla/csrf from tailscale.com/client/web github.com/gorilla/securecookie from github.com/gorilla/csrf github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ @@ -244,7 +244,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ @@ -398,7 +398,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ @@ -507,7 +508,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509+ - database/sql/driver from github.com/google/uuid + DW database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from crypto/internal/nistec+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index eb53f4f15e157..2831b4061973d 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -680,12 +680,15 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo ListenPort: args.port, NetMon: sys.NetMon.Get(), HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), Dialer: sys.Dialer.Get(), SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), DriveForLocal: driveimpl.NewFileSystemForLocal(logf), } + sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + onlyNetstack = name == "userspace-networking" netstackSubnetRouter := onlyNetstack // but mutated later on some platforms netns.SetEnabled(!onlyNetstack) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 3e077f9fdf782..1bdca8919a085 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -64,6 +64,7 @@ var ( flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") + flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") ) func main() { @@ -120,6 +121,7 @@ func main() { } else { ts := &tsnet.Server{ Hostname: "idp", + Dir: *flagDir, } if *flagVerbose { ts.Logf = log.Printf diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 072b43447dc3a..9cbd0e14ead52 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -82,6 +82,8 @@ type Direct struct { onControlTime func(time.Time) // or nil onTailnetDefaultAutoUpdate func(bool) // or nil panicOnUse bool // if true, panic if client is used (for testing) + closedCtx context.Context // alive until Direct.Close is called + closeCtx context.CancelFunc // cancels closedCtx dialPlan ControlDialPlanner // can be nil @@ -303,6 +305,8 @@ func NewDirect(opts Options) (*Direct, error) { dnsCache: dnsCache, dialPlan: opts.DialPlan, } + c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) + if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) } else { @@ -325,6 +329,8 @@ func NewDirect(opts Options) (*Direct, error) { // Close closes the underlying Noise connection(s). func (c *Direct) Close() error { + c.closeCtx() + c.mu.Lock() defer c.mu.Unlock() if c.noiseClient != nil { @@ -1223,7 +1229,7 @@ func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string return nil, fmt.Errorf("fetch control key response: %v", err) } if res.StatusCode != 200 { - return nil, fmt.Errorf("fetch control key: %d", res.StatusCode) + return nil, fmt.Errorf("fetch control key: %v", res.Status) } var out tailcfg.OverTLSPublicKeyResponse jsonErr := json.Unmarshal(b, &out) @@ -1628,7 +1634,7 @@ func (c *Direct) ReportHealthChange(w *health.Warnable, us *health.UnhealthyStat } // Best effort, no logging: - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) defer cancel() res, err := np.post(ctx, "/machine/update-health", nodeKey, req) if err != nil { diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index 44437e2f34837..3994af056fc3b 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -5,6 +5,7 @@ package controlclient import ( "bytes" + "cmp" "context" "encoding/json" "errors" @@ -16,6 +17,7 @@ import ( "golang.org/x/net/http2" "tailscale.com/control/controlhttp" + "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/internal/noiseconn" "tailscale.com/net/dnscache" @@ -28,6 +30,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/singleflight" + "tailscale.com/util/testenv" ) // NoiseClient provides a http.Client to connect to tailcontrol over @@ -56,8 +59,8 @@ type NoiseClient struct { privKey key.MachinePrivate serverPubKey key.MachinePublic host string // the host part of serverURL - httpPort string // the default port to call - httpsPort string // the fallback Noise-over-https port + httpPort string // the default port to dial + httpsPort string // the fallback Noise-over-https port or empty if none // dialPlan optionally returns a ControlDialPlan previously received // from the control server; either the function or the return value can @@ -104,6 +107,11 @@ type NoiseOpts struct { DialPlan func() *tailcfg.ControlDialPlan } +// controlIsPlaintext is whether we should assume that the controlplane is only accessible +// over plaintext HTTP (as the first hop, before the ts2021 encryption begins). +// This is used by some tests which don't have a real TLS certificate. +var controlIsPlaintext = envknob.RegisterBool("TS_CONTROL_IS_PLAINTEXT_HTTP") + // NewNoiseClient returns a new noiseClient for the provided server and machine key. // serverURL is of the form https://: (no trailing slash). // @@ -116,14 +124,17 @@ func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { } var httpPort string var httpsPort string - if u.Port() != "" { + if port := u.Port(); port != "" { // If there is an explicit port specified, trust the scheme and hope for the best if u.Scheme == "http" { - httpPort = u.Port() + httpPort = port httpsPort = "443" + if (testenv.InTest() || controlIsPlaintext()) && (u.Hostname() == "127.0.0.1" || u.Hostname() == "localhost") { + httpsPort = "" + } } else { httpPort = "80" - httpsPort = u.Port() + httpsPort = port } } else { // Otherwise, use the standard ports @@ -340,7 +351,7 @@ func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { clientConn, err := (&controlhttp.Dialer{ Hostname: nc.host, HTTPPort: nc.httpPort, - HTTPSPort: nc.httpsPort, + HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), MachineKey: nc.privKey, ControlKey: nc.serverPubKey, ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion), diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index e01cb1f9a4cc5..7e5263e3317fe 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -86,9 +86,6 @@ func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) { // httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before // starting to try a.HTTPSPort. func (a *Dialer) httpsFallbackDelay() time.Duration { - if forceNoise443() { - return time.Nanosecond - } if v := a.testFallbackDelay; v != 0 { return v } @@ -151,10 +148,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // before we do anything. if c.DialStartDelaySec > 0 { a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP) - if a.Clock == nil { - a.Clock = tstime.StdClock{} - } - tmr, tmrChannel := a.Clock.NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second))) + tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second))) defer tmr.Stop() select { case <-ctx.Done(): @@ -268,12 +262,43 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // fixed, this is a workaround. It might also be useful for future debugging. var forceNoise443 = envknob.RegisterBool("TS_FORCE_NOISE_443") +// forceNoise443 reports whether the controlclient noise dialer should always +// use HTTPS connections as its underlay connection (double crypto). This can +// be necessary when networks or middle boxes are messing with port 80. +func (d *Dialer) forceNoise443() bool { + if forceNoise443() { + return true + } + + if d.HealthTracker.LastNoiseDialWasRecent() { + // If we dialed recently, assume there was a recent failure and fall + // back to HTTPS dials for the subsequent retries. + // + // This heuristic works around networks where port 80 is MITMed and + // appears to work for a bit post-Upgrade but then gets closed, + // such as seen in https://github.com/tailscale/tailscale/issues/13597. + d.logf("controlhttp: forcing port 443 dial due to recent noise dial") + return true + } + + return false +} + +func (d *Dialer) clock() tstime.Clock { + if d.Clock != nil { + return d.Clock + } + return tstime.StdClock{} +} + var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL") // dialHost connects to the configured Dialer.Hostname and upgrades the -// connection into a controlbase.Conn. If addr is valid, then no DNS is used -// and the connection will be made to the provided address. -func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*ClientConn, error) { +// connection into a controlbase.Conn. +// +// If optAddr is valid, then no DNS is used and the connection will be made to the +// provided address. +func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) { // Create one shared context used by both port 80 and port 443 dials. // If port 80 is still in flight when 443 returns, this deferred cancel // will stop the port 80 dial. @@ -295,6 +320,9 @@ func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*ClientConn, er Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")), Path: serverUpgradePath, } + if a.HTTPSPort == NoPort { + u443 = nil + } type tryURLRes struct { u *url.URL // input (the URL conn+err are for/from) @@ -304,11 +332,11 @@ func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*ClientConn, er ch := make(chan tryURLRes) // must be unbuffered try := func(u *url.URL) { if debugNoiseDial() { - a.logf("trying noise dial (%v, %v) ...", u, addr) + a.logf("trying noise dial (%v, %v) ...", u, optAddr) } - cbConn, err := a.dialURL(ctx, u, addr) + cbConn, err := a.dialURL(ctx, u, optAddr) if debugNoiseDial() { - a.logf("noise dial (%v, %v) = (%v, %v)", u, addr, cbConn, err) + a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err) } select { case ch <- tryURLRes{u, cbConn, err}: @@ -319,18 +347,24 @@ func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*ClientConn, er } } + forceTLS := a.forceNoise443() + // Start the plaintext HTTP attempt first, unless disabled by the envknob. - if !forceNoise443() { + if !forceTLS || u443 == nil { go try(u80) } // In case outbound port 80 blocked or MITM'ed poorly, start a backup timer // to dial port 443 if port 80 doesn't either succeed or fail quickly. - if a.Clock == nil { - a.Clock = tstime.StdClock{} + var try443Timer tstime.TimerController + if u443 != nil { + delay := a.httpsFallbackDelay() + if forceTLS { + delay = 0 + } + try443Timer = a.clock().AfterFunc(delay, func() { try(u443) }) + defer try443Timer.Stop() } - try443Timer := a.Clock.AfterFunc(a.httpsFallbackDelay(), func() { try(u443) }) - defer try443Timer.Stop() var err80, err443 error for { @@ -349,7 +383,7 @@ func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*ClientConn, er // Stop the fallback timer and run it immediately. We don't use // Timer.Reset(0) here because on AfterFuncs, that can run it // again. - if try443Timer.Stop() { + if try443Timer != nil && try443Timer.Stop() { go try(u443) } // else we lost the race and it started already which is what we want case u443: @@ -365,12 +399,15 @@ func (a *Dialer) dialHost(ctx context.Context, addr netip.Addr) (*ClientConn, er } // dialURL attempts to connect to the given URL. -func (a *Dialer) dialURL(ctx context.Context, u *url.URL, addr netip.Addr) (*ClientConn, error) { +// +// If optAddr is valid, then no DNS is used and the connection will be made to the +// provided address. +func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) { init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion) if err != nil { return nil, err } - netConn, err := a.tryURLUpgrade(ctx, u, addr, init) + netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init) if err != nil { return nil, err } @@ -416,19 +453,20 @@ var macOSScreenTime = health.Register(&health.Warnable{ ImpactsConnectivity: true, }) -// tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. If addr -// is valid, then no DNS is used and the connection will be made to the -// provided address. +// tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. +// +// If optAddr is valid, then no DNS is used and the connection will be made to +// the provided address. // // Only the provided ctx is used, not a.ctx. -func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, addr netip.Addr, init []byte) (_ net.Conn, retErr error) { +func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) { var dns *dnscache.Resolver // If we were provided an address to dial, then create a resolver that just // returns that value; otherwise, fall back to DNS. - if addr.IsValid() { + if optAddr.IsValid() { dns = &dnscache.Resolver{ - SingleHostStaticResult: []netip.Addr{addr}, + SingleHostStaticResult: []netip.Addr{optAddr}, SingleHost: u.Hostname(), Logf: a.Logf, // not a.logf method; we want to propagate nil-ness } diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 6b511626201cd..ea1725e76d438 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -32,6 +32,11 @@ const ( serverUpgradePath = "/ts2021" ) +// NoPort is a sentinel value for Dialer.HTTPSPort to indicate that HTTPS +// should not be tried on any port. It exists primarily for some localhost +// tests where the control plane only runs on HTTP. +const NoPort = "none" + // Dialer contains configuration on how to dial the Tailscale control server. type Dialer struct { // Hostname is the hostname to connect to, with no port number. @@ -62,6 +67,8 @@ type Dialer struct { // HTTPSPort is the port number to use when making a HTTPS connection. // // If not specified, this defaults to port 443. + // + // If "none" (NoPort), HTTPS is disabled. HTTPSPort string // Dialer is the dialer used to make outbound connections. @@ -95,8 +102,9 @@ type Dialer struct { omitCertErrorLogging bool testFallbackDelay time.Duration - // tstime.Clock is used instead of time package for methods such as time.Now. - // If not specified, will default to tstime.StdClock{}. + // Clock, if non-nil, overrides the clock to use. + // If nil, tstime.StdClock is used. + // This exists primarily for tests. Clock tstime.Clock } diff --git a/control/controlhttp/server.go b/control/controlhttp/server.go index 6a0d2bc5682a9..7c3dd5618c4a3 100644 --- a/control/controlhttp/server.go +++ b/control/controlhttp/server.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ios + package controlhttp import ( diff --git a/derp/derp_client.go b/derp/derp_client.go index c2e7337791b60..7a646fa517940 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -121,6 +121,8 @@ func newClient(privateKey key.NodePrivate, nc Conn, brw *bufio.ReadWriter, logf return c, nil } +func (c *Client) PublicKey() key.NodePublic { return c.publicKey } + func (c *Client) recvServerKey() error { var buf [40]byte t, flen, err := readFrame(c.br, 1<<10, buf[:]) @@ -354,6 +356,10 @@ func (ReceivedPacket) msg() {} // PeerGoneMessage is a ReceivedMessage that indicates that the client // identified by the underlying public key is not connected to this // server. +// +// It has only historically been sent by the server when the client +// connection count decremented from 1 to 0 and not from e.g. 2 to 1. +// See https://github.com/tailscale/tailscale/issues/13566 for details. type PeerGoneMessage struct { Peer key.NodePublic Reason PeerGoneReasonType @@ -361,8 +367,13 @@ type PeerGoneMessage struct { func (PeerGoneMessage) msg() {} -// PeerPresentMessage is a ReceivedMessage that indicates that the client -// is connected to the server. (Only used by trusted mesh clients) +// PeerPresentMessage is a ReceivedMessage that indicates that the client is +// connected to the server. (Only used by trusted mesh clients) +// +// It will be sent to client watchers for every new connection from a client, +// even if the client's already connected with that public key. +// See https://github.com/tailscale/tailscale/issues/13566 for PeerPresentMessage +// and PeerGoneMessage not being 1:1. type PeerPresentMessage struct { // Key is the public key of the client. Key key.NodePublic diff --git a/derp/derp_server.go b/derp/derp_server.go index f38ae66211f85..8c5d6e890567b 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -46,6 +46,7 @@ import ( "tailscale.com/tstime/rate" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/version" @@ -144,6 +145,7 @@ type Server struct { tcpRtt metrics.LabelMap // histogram meshUpdateBatchSize *metrics.Histogram meshUpdateLoopCount *metrics.Histogram + bufferedWriteFrames *metrics.Histogram // how many sendLoop frames (or groups of related frames) get written per flush // verifyClientsLocalTailscaled only accepts client connections to the DERP // server if the clientKey is a known peer in the network, as specified by a @@ -164,11 +166,11 @@ type Server struct { // remote). If the value is non-nil, it's remote (+ maybe also // local). clientsMesh map[key.NodePublic]PacketForwarder - // sentTo tracks which peers have sent to which other peers, - // and at which connection number. This isn't on sclient - // because it includes intra-region forwarded packets as the - // src. - sentTo map[key.NodePublic]map[key.NodePublic]int64 // src => dst => dst's latest sclient.connNum + // peerGoneWatchers is the set of watchers that subscribed to a + // peer disconnecting from the region overall. When a peer + // is gone from the region, we notify all of these watchers, + // calling their funcs in a new goroutine. + peerGoneWatchers map[key.NodePublic]set.HandleSet[func(key.NodePublic)] // maps from netip.AddrPort to a client's public key keyOfAddr map[netip.AddrPort]key.NodePublic @@ -343,11 +345,12 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { netConns: map[Conn]chan struct{}{}, memSys0: ms.Sys, watchers: set.Set[*sclient]{}, - sentTo: map[key.NodePublic]map[key.NodePublic]int64{}, + peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, avgQueueDuration: new(uint64), tcpRtt: metrics.LabelMap{Label: "le"}, meshUpdateBatchSize: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000}), meshUpdateLoopCount: metrics.NewHistogram([]float64{0, 1, 2, 5, 10, 20, 50, 100}), + bufferedWriteFrames: metrics.NewHistogram([]float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100}), keyOfAddr: map[netip.AddrPort]key.NodePublic{}, clock: tstime.StdClock{}, } @@ -689,6 +692,40 @@ func (s *Server) unregisterClient(c *sclient) { } } +// addPeerGoneFromRegionWatcher adds a function to be called when peer is gone +// from the region overall. It returns a handle that can be used to remove the +// watcher later. +// +// The provided f func is usually [sclient.onPeerGoneFromRegion], added by +// [sclient.noteSendFromSrc]; this func doesn't take a whole *sclient to make it +// clear what has access to what. +func (s *Server) addPeerGoneFromRegionWatcher(peer key.NodePublic, f func(key.NodePublic)) set.Handle { + s.mu.Lock() + defer s.mu.Unlock() + hset, ok := s.peerGoneWatchers[peer] + if !ok { + hset = set.HandleSet[func(key.NodePublic)]{} + s.peerGoneWatchers[peer] = hset + } + return hset.Add(f) +} + +// removePeerGoneFromRegionWatcher removes a peer watcher previously added by +// addPeerGoneFromRegionWatcher, using the handle returned by +// addPeerGoneFromRegionWatcher. +func (s *Server) removePeerGoneFromRegionWatcher(peer key.NodePublic, h set.Handle) { + s.mu.Lock() + defer s.mu.Unlock() + hset, ok := s.peerGoneWatchers[peer] + if !ok { + return + } + delete(hset, h) + if len(hset) == 0 { + delete(s.peerGoneWatchers, peer) + } +} + // notePeerGoneFromRegionLocked sends peerGone frames to parties that // key has sent to previously (whether those sends were from a local // client or forwarded). It must only be called after the key has @@ -702,18 +739,11 @@ func (s *Server) notePeerGoneFromRegionLocked(key key.NodePublic) { // so they can drop their route entries to us (issue 150) // or move them over to the active client (in case a replaced client // connection is being unregistered). - for pubKey, connNum := range s.sentTo[key] { - set, ok := s.clients[pubKey] - if !ok { - continue - } - set.ForeachClient(func(peer *sclient) { - if peer.connNum == connNum { - go peer.requestPeerGoneWrite(key, PeerGoneReasonDisconnected) - } - }) + set := s.peerGoneWatchers[key] + for _, f := range set { + go f(key) } - delete(s.sentTo, key) + delete(s.peerGoneWatchers, key) } // requestPeerGoneWriteLimited sends a request to write a "peer gone" @@ -1004,9 +1034,6 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { dstLen = set.Len() dst = set.activeClient.Load() } - if dst != nil { - s.notePeerSendLocked(srcKey, dst) - } s.mu.Unlock() if dst == nil { @@ -1029,18 +1056,6 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { }) } -// notePeerSendLocked records that src sent to dst. We keep track of -// that so when src disconnects, we can tell dst (if it's still -// around) that src is gone (a peerGone frame). -func (s *Server) notePeerSendLocked(src key.NodePublic, dst *sclient) { - m, ok := s.sentTo[src] - if !ok { - m = map[key.NodePublic]int64{} - s.sentTo[src] = m - } - m[dst.key] = dst.connNum -} - // handleFrameSendPacket reads a "send packet" frame from the client. func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { s := c.s @@ -1059,9 +1074,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { dstLen = set.Len() dst = set.activeClient.Load() } - if dst != nil { - s.notePeerSendLocked(c.key, dst) - } else if dstLen < 1 { + if dst == nil && dstLen < 1 { fwd = s.clientsMesh[dstKey] } s.mu.Unlock() @@ -1181,6 +1194,13 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { return nil } +// onPeerGoneFromRegion is the callback registered with the Server to be +// notified (in a new goroutine) whenever a peer has disconnected from all DERP +// nodes in the current region. +func (c *sclient) onPeerGoneFromRegion(peer key.NodePublic) { + c.requestPeerGoneWrite(peer, PeerGoneReasonDisconnected) +} + // requestPeerGoneWrite sends a request to write a "peer gone" frame // with an explanation of why it is gone. It blocks until either the // write request is scheduled, or the client has closed. @@ -1494,8 +1514,9 @@ type sclient struct { connectedAt time.Time preferred bool - // Owned by sender, not thread-safe. - bw *lazyBufioWriter + // Owned by sendLoop, not thread-safe. + sawSrc map[key.NodePublic]set.Handle + bw *lazyBufioWriter // Guarded by s.mu // @@ -1598,34 +1619,48 @@ func (c *sclient) recordQueueTime(enqueuedAt time.Time) { } } -func (c *sclient) sendLoop(ctx context.Context) error { - defer func() { - // If the sender shuts down unilaterally due to an error, close so - // that the receive loop unblocks and cleans up the rest. - c.nc.Close() - - // Drain the send queue to count dropped packets - for { - select { - case pkt := <-c.sendQueue: - c.s.recordDrop(pkt.bs, pkt.src, c.key, dropReasonGoneDisconnected) - case pkt := <-c.discoSendQueue: - c.s.recordDrop(pkt.bs, pkt.src, c.key, dropReasonGoneDisconnected) - default: - return - } +// onSendLoopDone is called when the send loop is done +// to clean up. +// +// It must only be called from the sendLoop goroutine. +func (c *sclient) onSendLoopDone() { + // If the sender shuts down unilaterally due to an error, close so + // that the receive loop unblocks and cleans up the rest. + c.nc.Close() + + // Clean up watches. + for peer, h := range c.sawSrc { + c.s.removePeerGoneFromRegionWatcher(peer, h) + } + + // Drain the send queue to count dropped packets + for { + select { + case pkt := <-c.sendQueue: + c.s.recordDrop(pkt.bs, pkt.src, c.key, dropReasonGoneDisconnected) + case pkt := <-c.discoSendQueue: + c.s.recordDrop(pkt.bs, pkt.src, c.key, dropReasonGoneDisconnected) + default: + return } - }() + } + +} + +func (c *sclient) sendLoop(ctx context.Context) error { + defer c.onSendLoopDone() jitter := rand.N(5 * time.Second) keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(keepAlive + jitter) defer keepAliveTick.Stop() var werr error // last write error + inBatch := -1 // for bufferedWriteFrames for { if werr != nil { return werr } + inBatch++ // First, a non-blocking select (with a default) that // does as many non-flushing writes as possible. select { @@ -1657,6 +1692,10 @@ func (c *sclient) sendLoop(ctx context.Context) error { if werr = c.bw.Flush(); werr != nil { return werr } + if inBatch != 0 { // the first loop will almost always hit default & be size zero + c.s.bufferedWriteFrames.Observe(float64(inBatch)) + inBatch = 0 + } } // Then a blocking select with same: @@ -1667,7 +1706,6 @@ func (c *sclient) sendLoop(ctx context.Context) error { werr = c.sendPeerGone(msg.peer, msg.reason) case <-c.meshUpdate: werr = c.sendMeshUpdates() - continue case msg := <-c.sendQueue: werr = c.sendPacket(msg.src, msg.bs) c.recordQueueTime(msg.enqueuedAt) @@ -1676,7 +1714,6 @@ func (c *sclient) sendLoop(ctx context.Context) error { c.recordQueueTime(msg.enqueuedAt) case msg := <-c.sendPongCh: werr = c.sendPong(msg) - continue case <-keepAliveTickChannel: werr = c.sendKeepAlive() } @@ -1811,6 +1848,7 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) pktLen := len(contents) if withKey { pktLen += key.NodePublicRawLen + c.noteSendFromSrc(srcKey) } if err = writeFrameHeader(c.bw.bw(), frameRecvPacket, uint32(pktLen)); err != nil { return err @@ -1824,6 +1862,18 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) return err } +// noteSendFromSrc notes that we are about to write a packet +// from src to sclient. +// +// It must only be called from the sendLoop goroutine. +func (c *sclient) noteSendFromSrc(src key.NodePublic) { + if _, ok := c.sawSrc[src]; ok { + return + } + h := c.s.addPeerGoneFromRegionWatcher(src, c.onPeerGoneFromRegion) + mak.Set(&c.sawSrc, src, h) +} + // AddPacketForwarder registers fwd as a packet forwarder for dst. // fwd must be comparable. func (s *Server) AddPacketForwarder(dst key.NodePublic, fwd PacketForwarder) { @@ -2016,6 +2066,7 @@ func (s *Server) ExpVar() expvar.Var { m.Set("counter_tcp_rtt", &s.tcpRtt) m.Set("counter_mesh_update_batch_size", s.meshUpdateBatchSize) m.Set("counter_mesh_update_loop_count", s.meshUpdateLoopCount) + m.Set("counter_buffered_write_frames", s.bufferedWriteFrames) var expvarVersion expvar.String expvarVersion.Set(version.Long()) m.Set("version", &expvarVersion) diff --git a/derp/derp_test.go b/derp/derp_test.go index 72de265529ad1..9185194dd79cf 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -1311,6 +1311,72 @@ func TestLimiter(t *testing.T) { } } +// BenchmarkConcurrentStreams exercises mutex contention on a +// single Server instance with multiple concurrent client flows. +func BenchmarkConcurrentStreams(b *testing.B) { + serverPrivateKey := key.NewNode() + s := NewServer(serverPrivateKey, logger.Discard) + defer s.Close() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + for ctx.Err() == nil { + connIn, err := ln.Accept() + if err != nil { + if ctx.Err() != nil { + return + } + b.Error(err) + return + } + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + go s.Accept(ctx, connIn, brwServer, "test-client") + } + }() + + newClient := func(t testing.TB) *Client { + t.Helper() + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + t.Cleanup(func() { connOut.Close() }) + + k := key.NewNode() + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + return client + } + + b.RunParallel(func(pb *testing.PB) { + c1, c2 := newClient(b), newClient(b) + const packetSize = 100 + msg := make([]byte, packetSize) + for pb.Next() { + if err := c1.Send(c2.PublicKey(), msg); err != nil { + b.Fatal(err) + } + _, err := c2.Recv() + if err != nil { + return + } + } + }) +} + func BenchmarkSendRecv(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000} { b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) diff --git a/derp/derphttp/mesh_client.go b/derp/derphttp/mesh_client.go index c7e48c733be37..66b8c166eeb37 100644 --- a/derp/derphttp/mesh_client.go +++ b/derp/derphttp/mesh_client.go @@ -26,6 +26,10 @@ var testHookWatchLookConnectResult func(connectError error, wasSelfConnect bool) // returns. // // Otherwise, the add and remove funcs are called as clients come & go. +// Note that add is called for every new connection and remove is only +// called for the final disconnection. See https://github.com/tailscale/tailscale/issues/13566. +// This behavior will likely change. Callers should do their own accounting +// and dup suppression as needed. // // infoLogf, if non-nil, is the logger to write periodic status updates about // how many peers are on the server. Error log output is set to the c's logger, diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index dc7b50f195f24..7a658422cd7f6 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -14,6 +14,7 @@ Tailscale version 1.56.0 and later (full support), some earlier versions (partial support) Tailscale version 1.58.0 and later Tailscale version 1.62.0 and later + Tailscale version 1.74.0 and later Tailscale UI customization Settings @@ -42,6 +43,20 @@ To require logging in to a particular tailnet, add the "required:" prefix, such If you configure this policy, set it to the name of the tailnet, possibly with the "required:" prefix, as described above. If you disable this policy, the standard login page will be used.]]> + Specify the auth key to authenticate devices without user interaction + Require using a specific Exit Node Tailnet: + + + + + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index cb7f1cf807471..e70f124ed1a36 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -46,6 +46,10 @@ displayName="$(string.SINCE_V1_62)"> + + + @@ -79,6 +83,13 @@ + + + + + + + diff --git a/envknob/envknob.go b/envknob/envknob.go index f1925ccf449b2..59a6d90af213b 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -17,6 +17,7 @@ package envknob import ( "bufio" + "errors" "fmt" "io" "log" @@ -503,7 +504,7 @@ func ApplyDiskConfigError() error { return applyDiskConfigErr } // // On macOS, use one of: // -// - ~/Library/Containers/io.tailscale.ipn.macsys/Data/tailscaled-env.txt +// - /private/var/root/Library/Containers/io.tailscale.ipn.macsys.network-extension/Data/tailscaled-env.txt // for standalone macOS GUI builds // - ~/Library/Containers/io.tailscale.ipn.macos.network-extension/Data/tailscaled-env.txt // for App Store builds @@ -533,44 +534,73 @@ func ApplyDiskConfig() (err error) { return applyKeyValueEnv(f) } - name := getPlatformEnvFile() - if name == "" { + names := getPlatformEnvFiles() + if len(names) == 0 { return nil } - f, err = os.Open(name) - if os.IsNotExist(err) { - return nil - } - if err != nil { - return err + + var errs []error + for _, name := range names { + f, err = os.Open(name) + if os.IsNotExist(err) { + continue + } + if err != nil { + errs = append(errs, err) + continue + } + defer f.Close() + + return applyKeyValueEnv(f) } - defer f.Close() - return applyKeyValueEnv(f) + + // If we have any errors, return them; if all errors are such that + // os.IsNotExist(err) returns true, then errs is empty and we will + // return nil. + return errors.Join(errs...) } -// getPlatformEnvFile returns the current platform's path to an optional -// tailscaled-env.txt file. It returns an empty string if none is defined -// for the platform. -func getPlatformEnvFile() string { +// getPlatformEnvFiles returns a list of paths to the current platform's +// optional tailscaled-env.txt file. It returns an empty list if none is +// defined for the platform. +func getPlatformEnvFiles() []string { switch runtime.GOOS { case "windows": - return filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt") + return []string{ + filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt"), + } case "linux": if distro.Get() == distro.Synology { - return "/etc/tailscale/tailscaled-env.txt" + return []string{"/etc/tailscale/tailscaled-env.txt"} } case "darwin": if version.IsSandboxedMacOS() { // the two GUI variants (App Store or separate download) - // This will be user-visible as ~/Library/Containers/$VARIANT/Data/tailscaled-env.txt - // where $VARIANT is "io.tailscale.ipn.macsys" for macsys (downloadable mac GUI builds) - // or "io.tailscale.ipn.macos.network-extension" for App Store builds. - return filepath.Join(os.Getenv("HOME"), "tailscaled-env.txt") + // On the App Store variant, the home directory is set + // to something like: + // ~/Library/Containers/io.tailscale.ipn.macos.network-extension/Data + // + // On the macsys (downloadable Mac GUI) variant, the + // home directory can be unset, but we have a working + // directory that looks like: + // /private/var/root/Library/Containers/io.tailscale.ipn.macsys.network-extension/Data + // + // Try both and see if we can find the file in either + // location. + var candidates []string + if home := os.Getenv("HOME"); home != "" { + candidates = append(candidates, filepath.Join(home, "tailscaled-env.txt")) + } + if wd, err := os.Getwd(); err == nil { + candidates = append(candidates, filepath.Join(wd, "tailscaled-env.txt")) + } + + return candidates } else { // Open source / homebrew variable, running tailscaled-on-macOS. - return "/etc/tailscale/tailscaled-env.txt" + return []string{"/etc/tailscale/tailscaled-env.txt"} } } - return "" + return nil } // applyKeyValueEnv reads key=value lines r and calls Setenv for each. diff --git a/flake.nix b/flake.nix index e6c6b1eac092b..95d5c3035c7a9 100644 --- a/flake.nix +++ b/flake.nix @@ -120,6 +120,10 @@ perl go_1_23 yarn + + # qemu and e2fsprogs are needed for natlab + qemu + e2fsprogs ]; }; }; diff --git a/go.mod b/go.mod index 8c46faa6c1d44..464db8313b5fd 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.23 +go 1.23.1 require ( filippo.io/mkcert v1.4.4 @@ -40,7 +40,7 @@ require ( github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/snappy v0.0.4 - github.com/golangci/golangci-lint v1.52.2 + github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.18.0 github.com/google/gopacket v1.1.19 @@ -121,27 +121,50 @@ require ( ) require ( + github.com/4meepo/tagalign v1.3.3 // indirect + github.com/Antonboom/testifylint v1.2.0 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect + github.com/alecthomas/go-check-sumtype v0.1.4 // indirect + github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect + github.com/bombsimon/wsl/v4 v4.2.1 // indirect + github.com/butuzov/mirror v1.1.0 // indirect + github.com/catenacyber/perfsprint v0.7.1 // indirect + github.com/ccojocar/zxcvbn-go v1.0.2 // indirect + github.com/ckaznocha/intrange v0.1.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 // indirect github.com/dave/brenda v1.1.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ghostiam/protogetter v0.3.5 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/goccy/go-yaml v1.12.0 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect + github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/securecookie v1.1.2 // indirect + github.com/jjti/go-spancheck v0.5.3 // indirect + github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect + github.com/macabu/inamedparam v0.1.3 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect + go-simpler.org/musttag v0.9.0 // indirect + go-simpler.org/sloglint v0.5.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect ) @@ -150,22 +173,20 @@ require ( 4d63.com/gochecknoglobals v0.2.1 // indirect dario.cat/mergo v1.0.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/Abirdcfly/dupword v0.0.11 // indirect - github.com/AlekSi/pointer v1.2.0 // indirect - github.com/Antonboom/errname v0.1.9 // indirect - github.com/Antonboom/nilnil v0.1.4 // indirect + github.com/Abirdcfly/dupword v0.0.14 // indirect + github.com/AlekSi/pointer v1.2.0 + github.com/Antonboom/errname v0.1.12 // indirect + github.com/Antonboom/nilnil v0.1.7 // indirect github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect github.com/Djarvur/go-err113 v0.1.0 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/OpenPeeDeeP/depguard v1.1.1 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect - github.com/ashanbrown/forbidigo v1.5.1 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect @@ -183,31 +204,29 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v3 v3.4.0 // indirect - github.com/breml/bidichk v0.2.4 // indirect - github.com/breml/errchkjson v0.3.1 // indirect - github.com/butuzov/ireturn v0.2.0 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.3.0 // indirect github.com/cavaliergopher/cpio v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect - github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect + github.com/chavacava/garif v0.1.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.10.1 // indirect + github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/docker/cli v25.0.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v26.1.4+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/esimonov/ifshort v1.0.4 // indirect - github.com/ettle/strcase v0.1.1 // indirect + github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.17.0 // indirect @@ -215,7 +234,7 @@ require ( github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/go-critic/go-critic v0.8.0 // indirect + github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-git/go-git/v5 v5.11.0 // indirect @@ -225,7 +244,7 @@ require ( github.com/go-openapi/swag v0.22.7 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect - github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.2.0 // indirect github.com/go-toolsmith/astfmt v1.1.0 // indirect github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect @@ -235,20 +254,16 @@ require ( github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect - github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect - github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.4.0 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect - github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect + github.com/golangci/misspell v0.4.1 // indirect + github.com/golangci/revgrep v0.5.2 // indirect + github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 // indirect github.com/google/rpmpack v0.5.0 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 // indirect + github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/goreleaser/chglog v0.5.0 // indirect github.com/goreleaser/fileglob v1.3.0 // indirect github.com/gorilla/csrf v1.7.2 @@ -256,8 +271,6 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect @@ -265,24 +278,22 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jgautheron/goconst v1.7.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/junk1tm/musttag v0.5.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/kisielk/errcheck v1.6.3 // indirect - github.com/kisielk/gotool v1.0.0 // indirect + github.com/kisielk/errcheck v1.7.0 // indirect github.com/kkHAIKE/contextcheck v1.1.4 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.6 // indirect + github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/kyoh86/exportloopref v0.1.11 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect github.com/ldez/tagliatelle v0.5.0 // indirect @@ -294,9 +305,8 @@ require ( github.com/maratori/testpackage v1.1.1 // indirect github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/mbilski/exhaustivestruct v1.2.0 // indirect github.com/mdlayher/socket v0.5.0 - github.com/mgechev/revive v1.3.1 // indirect + github.com/mgechev/revive v1.3.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -306,45 +316,43 @@ require ( github.com/moricho/tparallel v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect - github.com/nishanths/exhaustive v0.10.0 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.11.2 // indirect + github.com/nunnatsa/ginkgolinter v0.16.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc6 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.2.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/polyfloyd/go-errorlint v1.4.1 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/polyfloyd/go-errorlint v1.4.8 // indirect + github.com/prometheus/client_model v0.5.0 github.com/prometheus/procfs v0.12.0 // indirect - github.com/quasilyte/go-ruleguard v0.3.19 // indirect + github.com/quasilyte/go-ruleguard v0.4.2 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect - github.com/ryancurrah/gomodguard v1.3.0 // indirect - github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/ryancurrah/gomodguard v1.3.1 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect - github.com/securego/gosec/v2 v2.15.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.25.0 // indirect + github.com/securego/gosec/v2 v2.19.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/nosnakecase v1.7.0 // indirect github.com/sivchari/tenv v1.7.1 // indirect github.com/skeema/knownhosts v1.2.1 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -358,25 +366,25 @@ require ( github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.11 // indirect + github.com/tetafro/godot v1.4.16 // indirect github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e // indirect github.com/ulikunitz/xz v0.5.11 // indirect - github.com/ultraware/funlen v0.0.3 // indirect - github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.6 // indirect + github.com/ultraware/funlen v0.1.0 // indirect + github.com/ultraware/whitespace v0.1.0 // indirect + github.com/uudashr/gocognit v1.1.2 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect - gitlab.com/bosi/decorder v0.2.3 // indirect + gitlab.com/bosi/decorder v0.4.1 // indirect gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a // indirect + golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.18.0 // indirect golang.org/x/text v0.16.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -392,10 +400,8 @@ require ( k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 - mvdan.cc/gofumpt v0.5.0 // indirect - mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect - mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 // indirect + mvdan.cc/gofumpt v0.6.0 // indirect + mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 94ea0ff912694..549f559d001fd 100644 --- a/go.sum +++ b/go.sum @@ -7,7 +7,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -18,9 +17,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -38,7 +34,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -48,14 +43,18 @@ filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= fyne.io/systray v1.11.0 h1:D9HISlxSkx+jHSniMBR6fCFOUjk1x/OOOJLa9lJYAKg= fyne.io/systray v1.11.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= -github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= -github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= +github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= +github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= +github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= -github.com/Antonboom/errname v0.1.9 h1:BZDX4r3l4TBZxZ2o2LNrlGxSHran4d1u4veZdoORTT4= -github.com/Antonboom/errname v0.1.9/go.mod h1:nLTcJzevREuAsgTbG85UsuiWpMpAqbKD1HNZ29OzE58= -github.com/Antonboom/nilnil v0.1.4 h1:yWIfwbCRDpJiJvs7Quz55dzeXCgORQyAG29N9/J5H2Q= -github.com/Antonboom/nilnil v0.1.4/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWjeste1SJm8= +github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Antonboom/testifylint v1.2.0 h1:015bxD8zc5iY8QwTp4+RG9I4kIbqwvGX9TrBbb7jGdM= +github.com/Antonboom/testifylint v1.2.0/go.mod h1:rkmEqjqVnHDRNsinyN6fPSLnoajzFwsCcguJgwADBkw= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -66,8 +65,8 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.1.0 h1:uCRZZOdMQ0TZPHYTdYpoC0bLYJKPEHPUJ8MeAa51lNU= github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -82,8 +81,8 @@ github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBa github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= -github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= @@ -92,6 +91,12 @@ github.com/ProtonMail/gopenpgp/v2 v2.7.1 h1:Awsg7MPc2gD3I7IFac2qE3Gdls0lZW8SzrFZ github.com/ProtonMail/gopenpgp/v2 v2.7.1/go.mod h1:/BU5gfAVwqyd8EfC3Eu7zmuhwYQpKs+cGD8M//iiaxs= github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A= github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -99,6 +104,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= +github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= @@ -110,8 +117,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuW github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.5.1 h1:WXhzLjOlnuDYPYQo/eFlcFMi8X/kLfvWLYu6CSoebis= -github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= @@ -175,29 +182,35 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= -github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= +github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= +github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= -github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8= -github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s= -github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ= -github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U= -github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4= -github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= +github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= +github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/caarlos0/go-rpmutils v0.2.1-0.20211112020245-2cd62ff89b11 h1:IRrDwVlWQr6kS1U8/EtyA1+EHcc4yl8pndcqXWrEamg= github.com/caarlos0/go-rpmutils v0.2.1-0.20211112020245-2cd62ff89b11/go.mod h1:je2KZ+LxaCNvCoKg32jtOIULcFogJKcL1ZWUaIBjKj0= github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= +github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= +github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM= github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= +github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= @@ -208,20 +221,20 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0= -github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= +github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -240,8 +253,8 @@ github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDU github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0= -github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= +github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14 h1:YI1gOOdmMk3xodBao7fehcvoZsEeOyy/cfhlpCSPgM4= github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms= github.com/dave/brenda v1.1.0 h1:Sl1LlwXnbw7xMhq3y2x11McFu43AjDcwkllxxgZ3EZw= @@ -256,8 +269,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ= -github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= -github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -289,13 +302,9 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= -github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= -github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= @@ -322,12 +331,14 @@ github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= +github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbxOK4Ug= +github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-critic/go-critic v0.8.0 h1:4zOcpvDoKvBOl+R1W81IBznr78f8YaE4zKXkfDVxGGA= -github.com/go-critic/go-critic v0.8.0/go.mod h1:5TjdkPI9cu/yKbYS96BTsslihjKd6zg6vd8O9RZXj2s= +github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjBSxDnM= +github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -378,8 +389,9 @@ github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4 github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astequal v1.2.0 h1:3Fs3CYZ1k9Vo4FzFhwwewC3CHISHDnVUPC4x0bI2+Cw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= @@ -391,6 +403,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= @@ -441,26 +455,20 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= -github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= -github.com/golangci/golangci-lint v1.52.2 h1:FrPElUUI5rrHXg1mQ7KxI1MXPAw5lBVskiz7U7a8a1A= -github.com/golangci/golangci-lint v1.52.2/go.mod h1:S5fhC5sHM5kE22/HcATKd1XLWQxX+y7mHj8B5H91Q/0= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0= -github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.57.1 h1:cqhpzkzjDwdN12rfMf1SUyyKyp88a1SltNqEYGS0nJw= +github.com/golangci/golangci-lint v1.57.1/go.mod h1:zLcHhz3NHc88T5zV2j75lyc0zH3LdOPOybblYa4p0oI= +github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= +github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU= +github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -493,7 +501,6 @@ github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4s github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -503,25 +510,20 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.5.0 h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A= github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 h1:9alfqbrhuD+9fLZ4iaAVwhlp5PEhmnBt7yvK2Oy5C1U= -github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/goreleaser/chglog v0.5.0 h1:Sk6BMIpx8+vpAf8KyPit34OgWui8c7nKTMHhYx88jJ4= github.com/goreleaser/chglog v0.5.0/go.mod h1:Ri46M3lrMuv76FHszs3vtABR8J8k1w9JHYAzxeeOl28= github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= @@ -547,11 +549,6 @@ github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Rep github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -569,7 +566,6 @@ github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f h1:ov45/OzrJG8EKbGjn7jJZQJTN7Z1t73sFYNIRd64YlI= github.com/hugelgupf/vmtest v0.0.0-20240102225328-693afabdd27f/go.mod h1:JoDrYMZpDPYo6uH9/f6Peqms3zNNWT2XiGgioMOIGuI= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -586,12 +582,14 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jgautheron/goconst v1.7.0 h1:cEqH+YBKLsECnRSd4F4TK5ri8t/aXtt/qoL0Ft252B0= +github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jjti/go-spancheck v0.5.3 h1:vfq4s2IB8T3HvbpiwDTYgVPj1Ze/ZSXrTtaZRTc7CuM= +github.com/jjti/go-spancheck v0.5.3/go.mod h1:eQdOX1k3T+nAKvZDyLC3Eby0La4dZ+I19iOl5NzSPFE= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -617,16 +615,15 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/junk1tm/musttag v0.5.0 h1:bV1DTdi38Hi4pG4OVWa7Kap0hi0o7EczuK6wQt9zPOM= -github.com/junk1tm/musttag v0.5.0/go.mod h1:PcR7BA+oREQYvHwgjIDmw3exJeds5JzRcvEJTfjrA0M= +github.com/karamaru-alpha/copyloopvar v1.0.8 h1:gieLARwuByhEMxRwM3GRS/juJqFbLraftXIKDDNJ50Q= +github.com/karamaru-alpha/copyloopvar v1.0.8/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= -github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= @@ -650,8 +647,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= -github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= +github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= +github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= @@ -664,6 +661,8 @@ github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -685,8 +684,6 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= @@ -695,8 +692,8 @@ github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/mgechev/revive v1.3.1 h1:OlQkcH40IB2cGuprTPcjB0iIUddgVZgGmDX3IAMR8D4= -github.com/mgechev/revive v1.3.1/go.mod h1:YlD6TTWl2B8A103R9KWJSPVI9DrEf+oqr15q21Ld+5I= +github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= +github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -732,16 +729,14 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/nishanths/exhaustive v0.10.0 h1:BMznKAcVa9WOoLq/kTGp4NJOJSMwEpcpjFNAVRfPlSo= -github.com/nishanths/exhaustive v0.10.0/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.11.2 h1:xzQpAsEyZe5F1RMy2Z5kn8UFCGiWfKqJOUd2ZzBXA4M= -github.com/nunnatsa/ginkgolinter v0.11.2/go.mod h1:dJIGXYXbkBswqa/pIzG0QlVTTDSBMxDoCFwhsl4Uras= +github.com/nunnatsa/ginkgolinter v0.16.1 h1:uDIPSxgVHZ7PgbJElRDGzymkXH+JaF7mjew+Thjnt6Q= +github.com/nunnatsa/ginkgolinter v0.16.1/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -756,14 +751,15 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= +github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -778,14 +774,15 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.4.1 h1:r8ru5FhXSn34YU1GJDOuoJv2LdsQkPmK325EOpPMJlM= -github.com/polyfloyd/go-errorlint v1.4.1/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8= +github.com/polyfloyd/go-errorlint v1.4.8 h1:jiEjKDH33ouFktyez7sckv6pHWif9B7SuS8cutDXFHw= +github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -816,8 +813,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff h1:X1Tly81aZ22DA1fxBdfvR3iw8+yFoUBUHMEd+AX/ZXI= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= -github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc= -github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw= +github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= +github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= @@ -829,23 +826,25 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= -github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= -github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= -github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= +github.com/ryancurrah/gomodguard v1.3.1 h1:fH+fUg+ngsQO0ruZXXHnA/2aNllWA1whly4a6UvyzGE= +github.com/ryancurrah/gomodguard v1.3.1/go.mod h1:DGFHzEhi6iJ0oIDfMuo3TgrS+L9gZvrEfmjjuelnRU0= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0= -github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU= -github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw= -github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8= +github.com/sashamelentyev/usestdlibvars v1.25.0 h1:IK8SI2QyFzy/2OD2PYnhy84dpfNo9qADrRt6LH8vSzU= +github.com/sashamelentyev/usestdlibvars v1.25.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.19.0 h1:gl5xMkOI0/E6Hxx0XCY2XujA3V7SNSefA8sC+3f1gnk= +github.com/securego/gosec/v2 v2.19.0/go.mod h1:hOkDcHz9J/XIgIlPDXalxjeVYsHxoWUc5zJSHxcB8YM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= @@ -864,8 +863,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= -github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= @@ -880,8 +877,8 @@ github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= @@ -903,7 +900,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -912,8 +908,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/studio-b12/gowebdav v0.9.0 h1:1j1sc9gQnNxbXXM4M/CebPOX4aXYtr7MojAVcN4dHjU= @@ -960,14 +955,14 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= +github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= -github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs= +github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= @@ -980,12 +975,12 @@ github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzW github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= -github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= -github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw= +github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= +github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -995,12 +990,16 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1008,16 +1007,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= -gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= +gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8= gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0= +go-simpler.org/assert v0.7.0 h1:OzWWZqfNxt8cLS+MlUp6Tgk1HjPkmgdKBq9qvy8lZsA= +go-simpler.org/assert v0.7.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.9.0 h1:Dzt6/tyP9ONr5g9h9P3cnYWCxeBFRkd0uJL/w+1Mxos= +go-simpler.org/musttag v0.9.0/go.mod h1:gA9nThnalvNSKpEoyp3Ko4/vCX2xTpqKoUtNqXOnVR4= +go-simpler.org/sloglint v0.5.0 h1:2YCcd+YMuYpuqthCgubcF5lBSjb6berc5VMOYUHKrpY= +go-simpler.org/sloglint v0.5.0/go.mod h1:EUknX5s8iXqf18KQxKnaBHUPVriiPnOrPjjJcsaTcSQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= @@ -1034,6 +1038,8 @@ go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1050,10 +1056,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= @@ -1074,8 +1078,8 @@ golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRj golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a h1:8qmSSA8Gz/1kTrCe0nqR0R3Gb/NDhykzWw2q2mWZydM= -golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ= @@ -1090,7 +1094,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1139,9 +1142,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1150,7 +1150,6 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= @@ -1161,10 +1160,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= @@ -1217,17 +1212,12 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1255,7 +1245,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= @@ -1266,12 +1255,10 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -1282,7 +1269,6 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1331,16 +1317,9 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= @@ -1352,7 +1331,6 @@ golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= @@ -1385,16 +1363,12 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1426,13 +1400,6 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac h1:OZkkudMUu9LVQMCoRUbI/1p5VCo9BOrlvkqMvWtqa6s= google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= @@ -1450,10 +1417,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1530,14 +1493,10 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= -mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 h1:VuJo4Mt0EVPychre4fNlDWDuE5AjXtPJpRUWqZDQhaI= -mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8= +mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= +mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/go.toolchain.rev b/go.toolchain.rev index 02b6e58781851..5d87594c25a31 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -ed9dc37b2b000f376a3e819cbb159e2c17a2dac6 +bf15628b759344c6fc7763795a405ba65b8be5d7 diff --git a/gokrazy/go.mod b/gokrazy/go.mod index 0233f3e6d1796..a9ba5a07d1fb4 100644 --- a/gokrazy/go.mod +++ b/gokrazy/go.mod @@ -1,6 +1,6 @@ module tailscale.com/gokrazy -go 1.23.0 +go 1.23.1 require github.com/gokrazy/tools v0.0.0-20240730192548-9f81add3a91e diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod index fa6768435524e..d4708bf4628ff 100644 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod +++ b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod @@ -1,5 +1,5 @@ module gokrazy/build/natlabapp.arm64 -go 1.23.0 +go 1.23.1 require github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod index 7bdfd1e060e6c..da21a143975e9 100644 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod +++ b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod @@ -1,6 +1,6 @@ module gokrazy/build/tsapp -go 1.23 +go 1.23.1 replace tailscale.com => ../../../.. diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.mod b/gokrazy/natlabapp/builddir/tailscale.com/go.mod index 7bdfd1e060e6c..da21a143975e9 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.mod +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.mod @@ -1,6 +1,6 @@ module gokrazy/build/tsapp -go 1.23 +go 1.23.1 replace tailscale.com => ../../../.. diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum index 9123439ed88bf..baa378c46708e 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.sum +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.sum @@ -70,6 +70,8 @@ github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwso github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= +github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= +github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= @@ -132,6 +134,8 @@ github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYw github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= +github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.mod b/gokrazy/tsapp/builddir/tailscale.com/go.mod index 7bdfd1e060e6c..da21a143975e9 100644 --- a/gokrazy/tsapp/builddir/tailscale.com/go.mod +++ b/gokrazy/tsapp/builddir/tailscale.com/go.mod @@ -1,6 +1,6 @@ module gokrazy/build/tsapp -go 1.23 +go 1.23.1 replace tailscale.com => ../../../.. diff --git a/health/health.go b/health/health.go index 7bb9d18e9ced7..216535d17c484 100644 --- a/health/health.go +++ b/health/health.go @@ -20,6 +20,7 @@ import ( "time" "tailscale.com/envknob" + "tailscale.com/metrics" "tailscale.com/tailcfg" "tailscale.com/types/opt" "tailscale.com/util/cibuild" @@ -95,6 +96,7 @@ type Tracker struct { inMapPollSince time.Time lastMapPollEndedAt time.Time lastStreamedMapResponse time.Time + lastNoiseDial time.Time derpHomeRegion int derpHomeless bool derpRegionConnected map[int]bool @@ -106,11 +108,11 @@ type Tracker struct { ipnWantRunning bool ipnWantRunningLastTrue time.Time // when ipnWantRunning last changed false -> true anyInterfaceUp opt.Bool // empty means unknown (assume true) - udp4Unbound bool controlHealth []string lastLoginErr error localLogConfigErr error tlsConnectionErrors map[string]error // map[ServerName]error + metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] } // Subsystem is the name of a subsystem whose health can be monitored. @@ -317,6 +319,33 @@ func (w *Warnable) IsVisible(ws *warningState) bool { return time.Since(ws.BrokenSince) >= w.TimeToVisible } +// SetMetricsRegistry sets up the metrics for the Tracker. It takes +// a usermetric.Registry and registers the metrics there. +func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { + if reg == nil || t.metricHealthMessage != nil { + return + } + + t.metricHealthMessage = usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( + reg, + "tailscaled_health_messages", + "gauge", + "Number of health messages broken down by type.", + ) + + t.metricHealthMessage.Set(metricHealthMessageLabel{ + Type: "warning", + }, expvar.Func(func() any { + if t.nil() { + return 0 + } + t.mu.Lock() + defer t.mu.Unlock() + t.updateBuiltinWarnablesLocked() + return int64(len(t.stringsLocked())) + })) +} + // SetUnhealthy sets a warningState for the given Warnable with the provided Args, and should be // called when a Warnable becomes unhealthy, or its unhealthy status needs to be updated. // SetUnhealthy takes ownership of args. The args can be nil if no additional information is @@ -814,8 +843,12 @@ func (t *Tracker) SetUDP4Unbound(unbound bool) { } t.mu.Lock() defer t.mu.Unlock() - t.udp4Unbound = unbound - t.selfCheckLocked() + + if unbound { + t.setUnhealthyLocked(noUDP4BindWarnable, nil) + } else { + t.setHealthyLocked(noUDP4BindWarnable) + } } // SetAuthRoutineInError records the latest error encountered as a result of a @@ -973,7 +1006,6 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { if v, ok := t.anyInterfaceUp.Get(); ok && !v { t.setUnhealthyLocked(NetworkStatusWarnable, nil) - return } else { t.setHealthyLocked(NetworkStatusWarnable) } @@ -982,11 +1014,50 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { t.setUnhealthyLocked(localLogWarnable, Args{ ArgError: t.localLogConfigErr.Error(), }) - return } else { t.setHealthyLocked(localLogWarnable) } + now := time.Now() + + // How long we assume we'll have heard a DERP frame or a MapResponse + // KeepAlive by. + const tooIdle = 2*time.Minute + 5*time.Second + + // Whether user recently turned on Tailscale. + recentlyOn := now.Sub(t.ipnWantRunningLastTrue) < 5*time.Second + + homeDERP := t.derpHomeRegion + if recentlyOn { + // If user just turned Tailscale on, don't warn for a bit. + t.setHealthyLocked(noDERPHomeWarnable) + t.setHealthyLocked(noDERPConnectionWarnable) + t.setHealthyLocked(derpTimeoutWarnable) + } else if !t.ipnWantRunning || t.derpHomeless || homeDERP != 0 { + t.setHealthyLocked(noDERPHomeWarnable) + } else { + t.setUnhealthyLocked(noDERPHomeWarnable, nil) + } + + if homeDERP != 0 && t.derpRegionConnected[homeDERP] { + t.setHealthyLocked(noDERPConnectionWarnable) + + if d := now.Sub(t.derpRegionLastFrame[homeDERP]); d < tooIdle { + t.setHealthyLocked(derpTimeoutWarnable) + } else { + t.setUnhealthyLocked(derpTimeoutWarnable, Args{ + ArgDERPRegionID: fmt.Sprint(homeDERP), + ArgDERPRegionName: t.derpRegionNameLocked(homeDERP), + ArgDuration: d.Round(time.Second).String(), + }) + } + } else { + t.setUnhealthyLocked(noDERPConnectionWarnable, Args{ + ArgDERPRegionID: fmt.Sprint(homeDERP), + ArgDERPRegionName: t.derpRegionNameLocked(homeDERP), + }) + } + if !t.ipnWantRunning { t.setUnhealthyLocked(IPNStateWarnable, Args{ "State": t.ipnState, @@ -1009,7 +1080,6 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { t.setHealthyLocked(LoginStateWarnable) } - now := time.Now() if !t.inMapPoll && (t.lastMapPollEndedAt.IsZero() || now.Sub(t.lastMapPollEndedAt) > 10*time.Second) { t.setUnhealthyLocked(notInMapPollWarnable, nil) return @@ -1017,7 +1087,6 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { t.setHealthyLocked(notInMapPollWarnable) } - const tooIdle = 2*time.Minute + 5*time.Second if d := now.Sub(t.lastStreamedMapResponse).Round(time.Second); d > tooIdle { t.setUnhealthyLocked(mapResponseTimeoutWarnable, Args{ ArgDuration: d.String(), @@ -1027,37 +1096,6 @@ func (t *Tracker) updateBuiltinWarnablesLocked() { t.setHealthyLocked(mapResponseTimeoutWarnable) } - if !t.derpHomeless { - rid := t.derpHomeRegion - if rid == 0 { - t.setUnhealthyLocked(noDERPHomeWarnable, nil) - return - } else if !t.derpRegionConnected[rid] { - t.setUnhealthyLocked(noDERPConnectionWarnable, Args{ - ArgDERPRegionID: fmt.Sprint(rid), - ArgDERPRegionName: t.derpRegionNameLocked(rid), - }) - return - } else if d := now.Sub(t.derpRegionLastFrame[rid]).Round(time.Second); d > tooIdle { - t.setUnhealthyLocked(derpTimeoutWarnable, Args{ - ArgDERPRegionID: fmt.Sprint(rid), - ArgDERPRegionName: t.derpRegionNameLocked(rid), - ArgDuration: d.String(), - }) - return - } - } - t.setHealthyLocked(noDERPHomeWarnable) - t.setHealthyLocked(noDERPConnectionWarnable) - t.setHealthyLocked(derpTimeoutWarnable) - - if t.udp4Unbound { - t.setUnhealthyLocked(noUDP4BindWarnable, nil) - return - } else { - t.setHealthyLocked(noUDP4BindWarnable) - } - // TODO: use _ = t.inMapPollSince _ = t.lastMapPollEndedAt @@ -1205,18 +1243,6 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { } func (t *Tracker) doOnceInit() { - metricHealthMessage.Set(metricHealthMessageLabel{ - Type: "warning", - }, expvar.Func(func() any { - if t.nil() { - return 0 - } - t.mu.Lock() - defer t.mu.Unlock() - t.updateBuiltinWarnablesLocked() - return int64(len(t.stringsLocked())) - })) - for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] f.name = (ReceiveFunc(i)).String() @@ -1248,13 +1274,25 @@ func (t *Tracker) checkReceiveFuncsLocked() { } } +// LastNoiseDialWasRecent notes that we're attempting to dial control via the +// ts2021 noise protocol and reports whether the prior dial was "recent" +// (currently defined as 2 minutes but subject to change). +// +// If t is nil, it reports false. +func (t *Tracker) LastNoiseDialWasRecent() bool { + if t.nil() { + return false + } + t.mu.Lock() + defer t.mu.Unlock() + + now := time.Now() + dur := now.Sub(t.lastNoiseDial) + t.lastNoiseDial = now + return dur < 2*time.Minute +} + type metricHealthMessageLabel struct { // TODO: break down by warnable.severity as well? Type string } - -var metricHealthMessage = usermetric.NewMultiLabelMap[metricHealthMessageLabel]( - "tailscaled_health_messages", - "gauge", - "Number of health messages broken down by type.", -) diff --git a/health/warnings.go b/health/warnings.go index e84043341c598..7a21f9695ff6d 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -65,7 +65,7 @@ var NetworkStatusWarnable = Register(&Warnable{ // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. var IPNStateWarnable = Register(&Warnable{ Code: "wantrunning-false", - Title: "Not connected to Tailscale", + Title: "Tailscale off", Severity: SeverityLow, Text: StaticMessage("Tailscale is stopped."), }) @@ -93,6 +93,7 @@ var LoginStateWarnable = Register(&Warnable{ return "You are logged out." } }, + DependsOn: []*Warnable{IPNStateWarnable}, }) // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. @@ -100,7 +101,7 @@ var notInMapPollWarnable = Register(&Warnable{ Code: "not-in-map-poll", Title: "Out of sync", Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), // 8 minutes reflects a maximum maintenance window for the coordination server. TimeToVisible: 8 * time.Minute, @@ -119,10 +120,20 @@ var noDERPHomeWarnable = Register(&Warnable{ // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. var noDERPConnectionWarnable = Register(&Warnable{ - Code: "no-derp-connection", - Title: "Relay server unavailable", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, + Code: "no-derp-connection", + Title: "Relay server unavailable", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + + // Technically noDERPConnectionWarnable could be used to warn about + // failure to connect to a specific DERP server (e.g. your home is derp1 + // but you're trying to connect to a peer's derp4 and are unable) but as + // of 2024-09-25 we only use this for connecting to your home DERP, so + // we depend on noDERPHomeWarnable which is the ability to figure out + // what your DERP home even is. + noDERPHomeWarnable, + }, Text: func(args Args) string { if n := args[ArgDERPRegionName]; n != "" { return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) @@ -134,12 +145,17 @@ var noDERPConnectionWarnable = Register(&Warnable{ TimeToVisible: 10 * time.Second, }) -// derpTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't heard from the home DERP region for a while. +// derpTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't +// heard from the home DERP region for a while. var derpTimeoutWarnable = Register(&Warnable{ - Code: "derp-timed-out", - Title: "Relay server timed out", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, + Code: "derp-timed-out", + Title: "Relay server timed out", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected + noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency + }, Text: func(args Args) string { if n := args[ArgDERPRegionName]; n != "" { return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) @@ -163,9 +179,9 @@ var derpRegionErrorWarnable = Register(&Warnable{ // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. var noUDP4BindWarnable = Register(&Warnable{ Code: "no-udp4-bind", - Title: "Incoming connections may fail", + Title: "NAT traversal setup failure", Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), ImpactsConnectivity: true, }) @@ -175,7 +191,7 @@ var mapResponseTimeoutWarnable = Register(&Warnable{ Code: "mapresponse-timeout", Title: "Network map response timeout", Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, Text: func(args Args) string { return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) }, diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 330669aea67b9..1f9037829d82d 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -200,8 +200,13 @@ func SetFirewallMode(v string) { firewallMode.Store(v) } // SetPackage sets the packaging type for the app. // -// As of 2022-03-25, this is used by Android ("nogoogle" for the -// F-Droid build) and tsnet (set to "tsnet"). +// For Android, the possible values are: +// - "googleplay": installed from Google Play Store. +// - "fdroid": installed from the F-Droid repository. +// - "amazon": installed from the Amazon Appstore. +// - "unknown": when the installer package name is null. +// - "unknown$installerPackageName": for unrecognized installer package names, prefixed by "unknown". +// Additionally, tsnet sets this value to "tsnet". func SetPackage(v string) { packagingType.Store(v) } // SetApp sets the app type for the app. diff --git a/ipn/backend.go b/ipn/backend.go index d6ba954089372..76ad1910bf14c 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -238,6 +238,7 @@ type StateKey string var DebuggableComponents = []string{ "magicsock", "sockstats", + "syspolicy", } type Options struct { diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index 0b4670c4292c2..a2bafb8b7fd22 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -8,10 +8,11 @@ package conffile import ( "bytes" "encoding/json" + "errors" "fmt" "os" + "runtime" - "github.com/tailscale/hujson" "tailscale.com/ipn" ) @@ -39,8 +40,21 @@ func (c *Config) WantRunning() bool { // from the VM's metadata service's user-data field. const VMUserDataPath = "vm:user-data" +// hujsonStandardize is set to hujson.Standardize by conffile_hujson.go on +// platforms that support config files. +var hujsonStandardize func([]byte) ([]byte, error) + // Load reads and parses the config file at the provided path on disk. func Load(path string) (*Config, error) { + switch runtime.GOOS { + case "ios", "android": + // compile-time for deadcode elimination + return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS) + } + if hujsonStandardize == nil { + // Build tags are wrong in conffile_hujson.go + return nil, errors.New("[unexpected] config file loading not wired up") + } var c Config c.Path = path var err error @@ -54,7 +68,7 @@ func Load(path string) (*Config, error) { if err != nil { return nil, err } - c.Std, err = hujson.Standardize(c.Raw) + c.Std, err = hujsonStandardize(c.Raw) if err != nil { return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) } diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go new file mode 100644 index 0000000000000..6825a06386625 --- /dev/null +++ b/ipn/conffile/conffile_hujson.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !android + +package conffile + +import "github.com/tailscale/hujson" + +// Only link the hujson package on platforms that use it, to reduce binary size +// & memory a bit. +// +// (iOS and Android don't have config files) + +// While the linker's deadcode mostly handles the hujson package today, this +// keeps us honest for the future. + +func init() { + hujsonStandardize = hujson.Standardize +} diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 5dcd1af37da1c..98d563d8746b1 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -93,8 +93,7 @@ func (b *LocalBackend) driveSetShareLocked(share *drive.Share) (views.SliceView[ addedShare := false var shares []*drive.Share - for i := range existingShares.Len() { - existing := existingShares.At(i) + for _, existing := range existingShares.All() { if existing.Name() != share.Name { if !addedShare && existing.Name() > share.Name { // Add share in order @@ -152,8 +151,7 @@ func (b *LocalBackend) driveRenameShareLocked(oldName, newName string) (views.Sl found := false var shares []*drive.Share - for i := range existingShares.Len() { - existing := existingShares.At(i) + for _, existing := range existingShares.All() { if existing.Name() == newName { return existingShares, os.ErrExist } @@ -213,8 +211,7 @@ func (b *LocalBackend) driveRemoveShareLocked(name string) (views.SliceView[*dri found := false var shares []*drive.Share - for i := range existingShares.Len() { - existing := existingShares.At(i) + for _, existing := range existingShares.All() { if existing.Name() != name { shares = append(shares, existing.AsStruct()) } else { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index dd64b80951025..5e7ccc1cb973c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -38,6 +38,7 @@ import ( "go4.org/mem" "go4.org/netipx" xmaps "golang.org/x/exp/maps" + "golang.org/x/net/dns/dnsmessage" "gvisor.dev/gvisor/pkg/tcpip" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" @@ -118,9 +119,6 @@ import ( "tailscale.com/wgengine/wgcfg/nmcfg" ) -var metricAdvertisedRoutes = usermetric.NewGauge( - "tailscaled_advertised_routes", "Number of advertised network routes (e.g. by a subnet router)") - var controlDebugFlags = getControlDebugFlags() func getControlDebugFlags() []string { @@ -183,6 +181,7 @@ type LocalBackend struct { statsLogf logger.Logf // for printing peers stats on change sys *tsd.System health *health.Tracker // always non-nil + metrics metrics e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys @@ -192,7 +191,6 @@ type LocalBackend struct { unregisterHealthWatch func() portpoll *portlist.Poller // may be nil portpollOnce sync.Once // guards starting readPoller - gotPortPollRes chan struct{} // closed upon first readPoller result varRoot string // or empty if SetVarRoot never called logFlushFunc func() // or nil if SetLogFlusher wasn't called em *expiryManager // non-nil @@ -376,6 +374,11 @@ func (b *LocalBackend) HealthTracker() *health.Tracker { return b.health } +// UserMetricsRegistry returns the usermetrics registry for the backend +func (b *LocalBackend) UserMetricsRegistry() *usermetric.Registry { + return b.sys.UserMetricsRegistry() +} + // NetMon returns the network monitor for the backend. func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() @@ -385,6 +388,21 @@ type updateStatus struct { started bool } +type metrics struct { + // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. + // This informs the user of how many routes are being advertised by the local node, excluding exit routes. + advertisedRoutes *usermetric.Gauge + + // approvedRoutes is a metric that reports the number of network routes served by the local node and approved + // by the control server. + approvedRoutes *usermetric.Gauge + + // primaryRoutes is a metric that reports the number of primary network routes served by the local node. + // A route being a primary route implies that the route is currently served by this node, and not by another + // subnet router in a high availability configuration. + primaryRoutes *usermetric.Gauge +} + // clientGen is a func that creates a control plane client. // It's the type used by LocalBackend.SetControlClientGetterForTesting. type clientGen func(controlclient.Options) (controlclient.Client, error) @@ -428,6 +446,15 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCtx, captiveCancel := context.WithCancel(ctx) captiveCancel() + m := metrics{ + advertisedRoutes: sys.UserMetricsRegistry().NewGauge( + "tailscaled_advertised_routes", "Number of advertised network routes (e.g. by a subnet router)"), + approvedRoutes: sys.UserMetricsRegistry().NewGauge( + "tailscaled_approved_routes", "Number of approved network routes (e.g. by a subnet router)"), + primaryRoutes: sys.UserMetricsRegistry().NewGauge( + "tailscaled_primary_routes", "Number of network routes for which this node is a primary router (in high availability configuration)"), + } + b := &LocalBackend{ ctx: ctx, ctxCancel: cancel, @@ -436,6 +463,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, health: sys.HealthTracker(), + metrics: m, e: e, dialer: dialer, store: store, @@ -444,7 +472,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo state: ipn.NoState, portpoll: new(portlist.Poller), em: newExpiryManager(logf), - gotPortPollRes: make(chan struct{}), loginFlags: loginFlags, clock: clock, selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), @@ -511,8 +538,8 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo currentShares := b.pm.prefs.DriveShares() if currentShares.Len() > 0 { var shares []*drive.Share - for i := range currentShares.Len() { - shares = append(shares, currentShares.At(i).AsStruct()) + for _, share := range currentShares.All() { + shares = append(shares, share.AsStruct()) } fs.SetShares(shares) } @@ -556,6 +583,8 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim } } } + case "syspolicy": + setEnabled = syspolicy.SetDebugLoggingEnabled } if setEnabled == nil || !slices.Contains(ipn.DebuggableComponents, component) { return fmt.Errorf("unknown component %q", component) @@ -606,6 +635,50 @@ func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { return manager.GetBaseConfig() } +// QueryDNS performs a DNS query for name and queryType using the built-in DNS resolver, and returns +// the raw DNS response and the resolvers that are were able to handle the query (the internal forwarder +// may race multiple resolvers). +func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []byte, resolvers []*dnstype.Resolver, err error) { + manager, ok := b.sys.DNSManager.GetOK() + if !ok { + return nil, nil, errors.New("DNS manager not available") + } + fqdn, err := dnsname.ToFQDN(name) + if err != nil { + b.logf("DNSQuery: failed to parse FQDN %q: %v", name, err) + return nil, nil, err + } + n, err := dnsmessage.NewName(fqdn.WithTrailingDot()) + if err != nil { + b.logf("DNSQuery: failed to parse name %q: %v", name, err) + return nil, nil, err + } + from := netip.MustParseAddrPort("127.0.0.1:0") + db := dnsmessage.NewBuilder(nil, dnsmessage.Header{ + OpCode: 0, + RecursionDesired: true, + ID: 1, + }) + db.StartQuestions() + db.Question(dnsmessage.Question{ + Name: n, + Type: queryType, + Class: dnsmessage.ClassINET, + }) + q, err := db.Finish() + if err != nil { + b.logf("DNSQuery: failed to build query: %v", err) + return nil, nil, err + } + res, err = manager.Query(b.ctx, q, "tcp", from) + if err != nil { + b.logf("DNSQuery: failed to query %q: %v", name, err) + return nil, nil, err + } + rr := manager.Resolver().GetUpstreamResolvers(fqdn) + return res, rr, nil +} + // GetComponentDebugLogging gets the time that component's debug logging is // enabled until, or the zero time if component's time is not currently // enabled. @@ -1985,20 +2058,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if b.portpoll != nil { b.portpollOnce.Do(func() { go b.readPoller() - - // Give the poller a second to get results to - // prevent it from restarting our map poll - // HTTP request (via doSetHostinfoFilterServices > - // cli.SetHostinfo). In practice this is very quick. - t0 := b.clock.Now() - timer, timerChannel := b.clock.NewTimer(time.Second) - select { - case <-b.gotPortPollRes: - b.logf("[v1] got initial portlist info in %v", b.clock.Since(t0).Round(time.Millisecond)) - timer.Stop() - case <-timerChannel: - b.logf("timeout waiting for initial portlist") - } }) } @@ -2514,21 +2573,21 @@ func shrinkDefaultRoute(route netip.Prefix, localInterfaceRoutes *netipx.IPSet, // readPoller is a goroutine that receives service lists from // b.portpoll and propagates them into the controlclient's HostInfo. func (b *LocalBackend) readPoller() { - isFirst := true + if !envknob.BoolDefaultTrue("TS_PORTLIST") { + return + } + ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval()) defer ticker.Stop() - initChan := make(chan struct{}) - close(initChan) for { select { case <-tickerChannel: case <-b.ctx.Done(): return - case <-initChan: - // Preserving old behavior: readPoller should - // immediately poll the first time, then wait - // for a tick after. - initChan = nil + } + + if !b.shouldUploadServices() { + continue } ports, changed, err := b.portpoll.Poll() @@ -2559,11 +2618,6 @@ func (b *LocalBackend) readPoller() { b.mu.Unlock() b.doSetHostinfoFilterServices() - - if isFirst { - isFirst = false - close(b.gotPortPollRes) - } } } @@ -4538,11 +4592,6 @@ func magicDNSRootDomains(nm *netmap.NetworkMap) []dnsname.FQDN { return nil } -var ( - ipv4Default = netip.MustParsePrefix("0.0.0.0/0") - ipv6Default = netip.MustParsePrefix("::/0") -) - // peerRoutes returns the routerConfig.Routes to access peers. // If there are over cgnatThreshold CGNAT routes, one big CGNAT route // is used instead. @@ -4643,9 +4692,9 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC var default4, default6 bool for _, route := range rs.Routes { switch route { - case ipv4Default: + case tsaddr.AllIPv4(): default4 = true - case ipv6Default: + case tsaddr.AllIPv6(): default6 = true } if default4 && default6 { @@ -4653,10 +4702,10 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC } } if !default4 { - rs.Routes = append(rs.Routes, ipv4Default) + rs.Routes = append(rs.Routes, tsaddr.AllIPv4()) } if !default6 { - rs.Routes = append(rs.Routes, ipv6Default) + rs.Routes = append(rs.Routes, tsaddr.AllIPv6()) } internalIPs, externalIPs, err := internalAndExternalInterfaces() if err != nil { @@ -4713,14 +4762,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.ShieldsUp = prefs.ShieldsUp() hi.AllowsUpdate = envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true) - // count routes without exit node routes - var routes int64 - for _, route := range hi.RoutableIPs { - if route.Bits() != 0 { - routes++ - } - } - metricAdvertisedRoutes.Set(float64(routes)) + b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) var sshHostKeys []string if prefs.RunSSH() && envknob.CanSSHD() { @@ -5345,6 +5387,11 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) if nm == nil { b.nodeByAddr = nil + + // If there is no netmap, the client is going into a "turned off" + // state so reset the metrics. + b.metrics.approvedRoutes.Set(0) + b.metrics.primaryRoutes.Set(0) return } @@ -5365,6 +5412,15 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } if nm.SelfNode.Valid() { addNode(nm.SelfNode) + + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } + } + b.metrics.approvedRoutes.Set(approved) + b.metrics.primaryRoutes.Set(float64(tsaddr.WithoutExitRoute(nm.SelfNode.PrimaryRoutes()).Len())) } for _, p := range nm.Peers { addNode(p) @@ -6189,8 +6245,8 @@ func wireguardExitNodeDNSResolvers(nm *netmap.NetworkMap, peers map[tailcfg.Node resolvers := p.ExitNodeDNSResolvers() if !resolvers.IsNil() && resolvers.Len() > 0 { copies := make([]*dnstype.Resolver, resolvers.Len()) - for i := range resolvers.Len() { - copies[i] = resolvers.At(i).AsStruct() + for i, r := range resolvers.All() { + copies[i] = r.AsStruct() } return copies, true } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index e4091ef02966e..b0e12d5005431 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -432,7 +432,7 @@ func newTestLocalBackend(t testing.TB) *LocalBackend { sys := new(tsd.System) store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index d05436e6dc8f4..f70987c0e8ad3 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -50,7 +50,7 @@ func TestLocalLogLines(t *testing.T) { sys := new(tsd.System) store := new(mem.Store) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 8497b38e2ee44..ff9b627693a8a 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/must" + "tailscale.com/util/usermetric" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" ) @@ -643,7 +644,8 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") ht := new(health.Tracker) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht) + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) h.ps = &peerAPIServer{ b: &LocalBackend{ @@ -694,7 +696,8 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") ht := new(health.Tracker) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht) + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -767,7 +770,8 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { rc := &appctest.RouteCollector{} ht := new(health.Tracker) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht) + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -830,8 +834,9 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") ht := new(health.Tracker) + reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index e43de17658ca7..73e66c2b9db16 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -684,6 +684,7 @@ func newTestBackend(t *testing.T) *LocalBackend { e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), }) if err != nil { t.Fatal(err) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 20dde81f14587..bebd0152b5a36 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -298,7 +298,7 @@ func TestStateMachine(t *testing.T) { sys := new(tsd.System) store := new(testStateStorage) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -931,7 +931,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := new(tsd.System) sys.Set(new(mem.Store)) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 01dc064cfda6e..528304bab77d4 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -31,7 +31,7 @@ import ( "sync" "time" - "github.com/google/uuid" + "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/drive" @@ -49,6 +49,7 @@ import ( "tailscale.com/taildrop" "tailscale.com/tka" "tailscale.com/tstime" + "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -62,7 +63,6 @@ import ( "tailscale.com/util/progresstracking" "tailscale.com/util/rands" "tailscale.com/util/testenv" - "tailscale.com/util/usermetric" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -99,6 +99,7 @@ var handler = map[string]localAPIHandler{ "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, "dns-osconfig": (*Handler).serveDNSOSConfig, + "dns-query": (*Handler).serveDNSQuery, "drive/fileserver-address": (*Handler).serveDriveServerAddr, "drive/shares": (*Handler).serveShares, "file-targets": (*Handler).serveFileTargets, @@ -578,7 +579,7 @@ func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { http.Error(w, "usermetrics debug flag not enabled", http.StatusForbidden) return } - usermetric.Handler(w, r) + h.b.UserMetricsRegistry().Handler(w, r) } func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { @@ -1561,7 +1562,7 @@ func (h *Handler) serveFilePut(w http.ResponseWriter, r *http.Request) { switch r.Method { case "PUT": file := ipn.OutgoingFile{ - ID: uuid.Must(uuid.NewRandom()).String(), + ID: rands.HexString(30), PeerID: peerID, Name: filenameEscaped, DeclaredSize: r.ContentLength, @@ -2746,6 +2747,49 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(response) } +// serveDNSQuery provides the ability to perform DNS queries using the internal +// DNS forwarder. This is useful for debugging and testing purposes. +// URL parameters: +// - name: the domain name to query +// - type: the DNS record type to query as a number (default if empty: A = '1') +// +// The response if successful is a DNSQueryResponse JSON object. +func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + // Require write access for privacy reasons. + if !h.PermitWrite { + http.Error(w, "dns-query access denied", http.StatusForbidden) + return + } + q := r.URL.Query() + name := q.Get("name") + queryType := q.Get("type") + qt := dnsmessage.TypeA + if queryType != "" { + t, err := dnstype.DNSMessageTypeForString(queryType) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + qt = t + } + + res, rrs, err := h.b.QueryDNS(name, qt) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&apitype.DNSQueryResponse{ + Bytes: res, + Resolvers: rrs, + }) +} + // serveDriveServerAddr handles updates of the Taildrive file server address. func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { if r.Method != "PUT" { diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 5ec873b3bdee3..fa54a1e756a7e 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -356,7 +356,7 @@ func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { sys := new(tsd.System) store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index e96baa7902b16..e8a6e248a2934 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -14,6 +14,8 @@ - [DNSConfigList](#dnsconfiglist) - [ProxyClass](#proxyclass) - [ProxyClassList](#proxyclasslist) +- [ProxyGroup](#proxygroup) +- [ProxyGroupList](#proxygrouplist) - [Recorder](#recorder) - [RecorderList](#recorderlist) @@ -30,7 +32,7 @@ node can be configured to act as a Tailscale subnet router and/or a Tailscale exit node. Connector is a cluster-scoped resource. More info: -https://tailscale.com/kb/1236/kubernetes-operator#deploying-exit-nodes-and-subnet-routers-on-kubernetes-using-connector-custom-resource +https://tailscale.com/kb/1441/kubernetes-operator-connector @@ -81,7 +83,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
Defaults to [tag:k8s].
To autoapprove the subnet routes or exit node defined by a Connector,
you can configure Tailscale ACLs to give these tags the necessary
permissions.
See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes.
If you specify custom tags here, you must also make the operator an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Connector node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| +| `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
Defaults to [tag:k8s].
To autoapprove the subnet routes or exit node defined by a Connector,
you can configure Tailscale ACLs to give these tags the necessary
permissions.
See https://tailscale.com/kb/1337/acl-syntax#autoapprovers.
If you specify custom tags here, you must also make the operator an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a Connector node has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
dashes, it must not start or end with a dash and must be between 2
and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
Type: string
| | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
contains configuration options that should be applied to the
resources created for this Connector. If unset, the operator will
create resources with the default configuration. | | | | `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector node should
expose to tailnet. If unset, none are exposed.
https://tailscale.com/kb/1019/subnets/ | | | @@ -261,6 +263,21 @@ _Appears in:_ +#### HostnamePrefix + +_Underlying type:_ _string_ + + + +_Validation:_ +- Pattern: `^[a-z0-9][a-z0-9-]{0,61}$` +- Type: string + +_Appears in:_ +- [ProxyGroupSpec](#proxygroupspec) + + + #### Metrics @@ -378,7 +395,7 @@ given ProxyClass to resources created for a Connector, use connector.spec.proxyClass field. ProxyClass is a cluster scoped resource. More info: -https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource. +https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource @@ -450,6 +467,100 @@ _Appears in:_ | `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyClass.
Known condition types are `ProxyClassReady`. | | | +#### ProxyGroup + + + + + + + +_Appears in:_ +- [ProxyGroupList](#proxygrouplist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyGroup` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ProxyGroupSpec](#proxygroupspec)_ | Spec describes the desired ProxyGroup instances. | | | +| `status` _[ProxyGroupStatus](#proxygroupstatus)_ | ProxyGroupStatus describes the status of the ProxyGroup resources. This is
set and managed by the Tailscale operator. | | | + + +#### ProxyGroupList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyGroupList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ProxyGroup](#proxygroup) array_ | | | | + + +#### ProxyGroupSpec + + + + + + + +_Appears in:_ +- [ProxyGroup](#proxygroup) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Currently the only supported type is egress. | | Enum: [egress]
Type: string
| +| `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| +| `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | | +| `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| +| `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, and there is no default ProxyClass
configured, the operator will create resources with the default
configuration. | | | + + +#### ProxyGroupStatus + + + + + + + +_Appears in:_ +- [ProxyGroup](#proxygroup) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types are `ProxyGroupReady`. | | | +| `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the ProxyGroup StatefulSet. | | | + + +#### ProxyGroupType + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [egress] +- Type: string + +_Appears in:_ +- [ProxyGroupSpec](#proxygroupspec) + + + #### Recorder @@ -586,7 +697,25 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the Recorder.
Known condition types are `RecorderReady`. | | | -| `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the Recorder statefulset. | | | +| `devices` _[RecorderTailnetDevice](#recordertailnetdevice) array_ | List of tailnet devices associated with the Recorder StatefulSet. | | | + + +#### RecorderTailnetDevice + + + + + + + +_Appears in:_ +- [RecorderStatus](#recorderstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the fully qualified domain name of the device.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | +| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the device. | | | +| `url` _string_ | URL where the UI is available if enabled for replaying recordings. This
will be an HTTPS MagicDNS URL. You must be connected to the same tailnet
as the recorder to access it. | | | #### Route @@ -748,6 +877,7 @@ _Validation:_ _Appears in:_ - [ConnectorSpec](#connectorspec) +- [ProxyGroupSpec](#proxygroupspec) - [RecorderSpec](#recorderspec) @@ -761,13 +891,12 @@ _Appears in:_ _Appears in:_ -- [RecorderStatus](#recorderstatus) +- [ProxyGroupStatus](#proxygroupstatus) | Field | Description | Default | Validation | | --- | --- | --- | --- | | `hostname` _string_ | Hostname is the fully qualified domain name of the device.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the device. | | | -| `url` _string_ | URL where the UI is available if enabled for replaying recordings. This
will be an HTTPS MagicDNS URL. You must be connected to the same tailnet
as the recorder to access it. | | | #### TailscaleConfig @@ -783,6 +912,6 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `acceptRoutes` _boolean_ | AcceptRoutes can be set to true to make the proxy instance accept
routes advertized by other nodes on the tailnet, such as subnet
routes.
This is equivalent of passing --accept-routes flag to a tailscale Linux client.
https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines
Defaults to false. | | | +| `acceptRoutes` _boolean_ | AcceptRoutes can be set to true to make the proxy instance accept
routes advertized by other nodes on the tailnet, such as subnet
routes.
This is equivalent of passing --accept-routes flag to a tailscale Linux client.
https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices
Defaults to false. | | | diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index b16bc7b7be333..70b411d120994 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -58,6 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DNSConfigList{}, &Recorder{}, &RecorderList{}, + &ProxyGroup{}, + &ProxyGroupList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 87c44926b52bd..27afd0838a388 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -29,7 +29,7 @@ var ConnectorKind = "Connector" // exit node. // Connector is a cluster-scoped resource. // More info: -// https://tailscale.com/kb/1236/kubernetes-operator#deploying-exit-nodes-and-subnet-routers-on-kubernetes-using-connector-custom-resource +// https://tailscale.com/kb/1441/kubernetes-operator-connector type Connector struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -62,7 +62,7 @@ type ConnectorSpec struct { // To autoapprove the subnet routes or exit node defined by a Connector, // you can configure Tailscale ACLs to give these tags the necessary // permissions. - // See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes. + // See https://tailscale.com/kb/1337/acl-syntax#autoapprovers. // If you specify custom tags here, you must also make the operator an owner of these tags. // See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. // Tags cannot be changed once a Connector node has been created. @@ -171,7 +171,20 @@ type ConditionType string const ( ConnectorReady ConditionType = `ConnectorReady` - ProxyClassready ConditionType = `ProxyClassReady` + ProxyClassReady ConditionType = `ProxyClassReady` + ProxyGroupReady ConditionType = `ProxyGroupReady` ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service RecorderReady ConditionType = `RecorderReady` + // EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed + // on a ProxyGroup. + // Set to true if the user provided configuration is valid. + EgressSvcValid ConditionType = `TailscaleEgressSvcValid` + // EgressSvcConfigured gets set on a user configured ExternalName Service that defines a tailnet target to be exposed + // on a ProxyGroup. + // Set to true if the cluster resources for the service have been successfully configured. + EgressSvcConfigured ConditionType = `TailscaleEgressSvcConfigured` + // EgressSvcReady gets set on a user configured ExternalName Service that defines a tailnet target to be exposed + // on a ProxyGroup. + // Set to true if the service is ready to route cluster traffic. + EgressSvcReady ConditionType = `TailscaleEgressSvcReady` ) diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index bdf7bd380885b..7f415bc340bd7 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -25,7 +25,7 @@ var ProxyClassKind = "ProxyClass" // connector.spec.proxyClass field. // ProxyClass is a cluster scoped resource. // More info: -// https://tailscale.com/kb/1236/kubernetes-operator#cluster-resource-customization-using-proxyclass-custom-resource. +// https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource type ProxyClass struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -73,7 +73,7 @@ type TailscaleConfig struct { // routes advertized by other nodes on the tailnet, such as subnet // routes. // This is equivalent of passing --accept-routes flag to a tailscale Linux client. - // https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-machines + // https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices // Defaults to false. AcceptRoutes bool `json:"acceptRoutes,omitempty"` } diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go new file mode 100644 index 0000000000000..7e5515ba9d66c --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -0,0 +1,111 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=pg +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." + +type ProxyGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec describes the desired ProxyGroup instances. + Spec ProxyGroupSpec `json:"spec"` + + // ProxyGroupStatus describes the status of the ProxyGroup resources. This is + // set and managed by the Tailscale operator. + // +optional + Status ProxyGroupStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type ProxyGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ProxyGroup `json:"items"` +} + +type ProxyGroupSpec struct { + // Type of the ProxyGroup proxies. Currently the only supported type is egress. + Type ProxyGroupType `json:"type"` + + // Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s]. + // If you specify custom tags here, make sure you also make the operator + // an owner of these tags. + // See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. + // Tags cannot be changed once a ProxyGroup device has been created. + // Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. + // +optional + Tags Tags `json:"tags,omitempty"` + + // Replicas specifies how many replicas to create the StatefulSet with. + // Defaults to 2. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // HostnamePrefix is the hostname prefix to use for tailnet devices created + // by the ProxyGroup. Each device will have the integer number from its + // StatefulSet pod appended to this prefix to form the full hostname. + // HostnamePrefix can contain lower case letters, numbers and dashes, it + // must not start with a dash and must be between 1 and 62 characters long. + // +optional + HostnamePrefix HostnamePrefix `json:"hostnamePrefix,omitempty"` + + // ProxyClass is the name of the ProxyClass custom resource that contains + // configuration options that should be applied to the resources created + // for this ProxyGroup. If unset, and there is no default ProxyClass + // configured, the operator will create resources with the default + // configuration. + // +optional + ProxyClass string `json:"proxyClass,omitempty"` +} + +type ProxyGroupStatus struct { + // List of status conditions to indicate the status of the ProxyGroup + // resources. Known condition types are `ProxyGroupReady`. + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // List of tailnet devices associated with the ProxyGroup StatefulSet. + // +listType=map + // +listMapKey=hostname + // +optional + Devices []TailnetDevice `json:"devices,omitempty"` +} + +type TailnetDevice struct { + // Hostname is the fully qualified domain name of the device. + // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + // node. + Hostname string `json:"hostname"` + + // TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + // assigned to the device. + // +optional + TailnetIPs []string `json:"tailnetIPs,omitempty"` +} + +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:Enum=egress +type ProxyGroupType string + +const ( + ProxyGroupTypeEgress ProxyGroupType = "egress" +) + +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-]{0,61}$` +type HostnamePrefix string diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index f365ab3163965..3728154b45170 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -223,14 +223,14 @@ type RecorderStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // List of tailnet devices associated with the Recorder statefulset. + // List of tailnet devices associated with the Recorder StatefulSet. // +listType=map // +listMapKey=hostname // +optional - Devices []TailnetDevice `json:"devices,omitempty"` + Devices []RecorderTailnetDevice `json:"devices,omitempty"` } -type TailnetDevice struct { +type RecorderTailnetDevice struct { // Hostname is the fully qualified domain name of the device. // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the // node. diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 5464f4e37bb48..ba4ff40e46dd5 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -515,6 +515,119 @@ func (in *ProxyClassStatus) DeepCopy() *ProxyClassStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroup) DeepCopyInto(out *ProxyGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroup. +func (in *ProxyGroup) DeepCopy() *ProxyGroup { + if in == nil { + return nil + } + out := new(ProxyGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupList) DeepCopyInto(out *ProxyGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxyGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupList. +func (in *ProxyGroupList) DeepCopy() *ProxyGroupList { + if in == nil { + return nil + } + out := new(ProxyGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(Tags, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupSpec. +func (in *ProxyGroupSpec) DeepCopy() *ProxyGroupSpec { + if in == nil { + return nil + } + out := new(ProxyGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupStatus) DeepCopyInto(out *ProxyGroupStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]TailnetDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupStatus. +func (in *ProxyGroupStatus) DeepCopy() *ProxyGroupStatus { + if in == nil { + return nil + } + out := new(ProxyGroupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Recorder) DeepCopyInto(out *Recorder) { *out = *in @@ -723,7 +836,7 @@ func (in *RecorderStatus) DeepCopyInto(out *RecorderStatus) { } if in.Devices != nil { in, out := &in.Devices, &out.Devices - *out = make([]TailnetDevice, len(*in)) + *out = make([]RecorderTailnetDevice, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -740,6 +853,26 @@ func (in *RecorderStatus) DeepCopy() *RecorderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecorderTailnetDevice) DeepCopyInto(out *RecorderTailnetDevice) { + *out = *in + if in.TailnetIPs != nil { + in, out := &in.TailnetIPs, &out.TailnetIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderTailnetDevice. +func (in *RecorderTailnetDevice) DeepCopy() *RecorderTailnetDevice { + if in == nil { + return nil + } + out := new(RecorderTailnetDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Routes) DeepCopyInto(out *Routes) { { diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 322d1eb349b0e..ace0fb7e33a75 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -56,6 +56,18 @@ func SetServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionType, svc.Status.Conditions = conds } +// GetServiceCondition returns Service condition with the specified type, if it exists on the Service. +func GetServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionType) *metav1.Condition { + idx := xslices.IndexFunc(svc.Status.Conditions, func(cond metav1.Condition) bool { + return cond.Type == string(conditionType) + }) + + if idx == -1 { + return nil + } + return &svc.Status.Conditions[idx] +} + // RemoveServiceCondition will remove condition of the given type if it exists. func RemoveServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionType) { svc.Status.Conditions = slices.DeleteFunc(svc.Status.Conditions, func(cond metav1.Condition) bool { @@ -63,6 +75,16 @@ func RemoveServiceCondition(svc *corev1.Service, conditionType tsapi.ConditionTy }) } +func EgressServiceIsValidAndConfigured(svc *corev1.Service) bool { + for _, typ := range []tsapi.ConditionType{tsapi.EgressSvcValid, tsapi.EgressSvcConfigured} { + cond := GetServiceCondition(svc, typ) + if cond == nil || cond.Status != metav1.ConditionTrue { + return false + } + } + return true +} + // SetRecorderCondition ensures that Recorder status has a condition with the // given attributes. LastTransitionTime gets set every time condition's status // changes. @@ -71,6 +93,14 @@ func SetRecorderCondition(tsr *tsapi.Recorder, conditionType tsapi.ConditionType tsr.Status.Conditions = conds } +// SetProxyGroupCondition ensures that ProxyGroup status has a condition with the +// given attributes. LastTransitionTime gets set every time condition's status +// changes. +func SetProxyGroupCondition(pg *tsapi.ProxyGroup, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(pg.Status.Conditions, conditionType, status, reason, message, gen, clock, logger) + pg.Status.Conditions = conds +} + func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition { newCondition := metav1.Condition{ Type: string(conditionType), @@ -107,7 +137,7 @@ func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { idx := xslices.IndexFunc(pc.Status.Conditions, func(cond metav1.Condition) bool { - return cond.Type == string(tsapi.ProxyClassready) + return cond.Type == string(tsapi.ProxyClassReady) }) if idx == -1 { return false @@ -116,6 +146,17 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pc.Generation } +func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool { + idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { + return cond.Type == string(tsapi.ProxyGroupReady) + }) + if idx == -1 { + return false + } + cond := pg.Status.Conditions[idx] + return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation +} + func DNSCfgIsReady(cfg *tsapi.DNSConfig) bool { idx := xslices.IndexFunc(cfg.Status.Conditions, func(cond metav1.Condition) bool { return cond.Type == string(tsapi.NameserverReady) diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 497f31b600e4f..a1f225fe601c8 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -29,9 +29,9 @@ type Records struct { IP4 map[string][]string `json:"ip4"` } -// TailscaledConfigFileNameForCap returns a tailscaled config file name in +// TailscaledConfigFileName returns a tailscaled config file name in // format expected by containerboot for the given CapVer. -func TailscaledConfigFileNameForCap(cap tailcfg.CapabilityVersion) string { +func TailscaledConfigFileName(cap tailcfg.CapabilityVersion) string { if cap < 95 { return "tailscaled" } diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go new file mode 100644 index 0000000000000..04a1c362b00c4 --- /dev/null +++ b/kube/egressservices/egressservices.go @@ -0,0 +1,103 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package egressservices contains shared types for exposing tailnet services to +// cluster workloads. +// These are split into a separate package for consumption of +// non-Kubernetes shared libraries and binaries. Be mindful of not increasing +// dependency size for those consumers when adding anything new here. +package egressservices + +import ( + "encoding/json" + "net/netip" +) + +// KeyEgressServices is name of the proxy state Secret field that contains the +// currently applied egress proxy config. +const KeyEgressServices = "egress-services" + +// Configs contains the desired configuration for egress services keyed by +// service name. +type Configs map[string]Config + +// Config is an egress service configuration. +// TODO(irbekrm): version this? +type Config struct { + // TailnetTarget is the target to which cluster traffic for this service + // should be proxied. + TailnetTarget TailnetTarget `json:"tailnetTarget"` + // Ports contains mappings for ports that can be accessed on the tailnet target. + Ports PortMaps `json:"ports"` +} + +// TailnetTarget is the tailnet target to which traffic for the egress service +// should be proxied. Exactly one of IP or FQDN should be set. +type TailnetTarget struct { + // IP is the tailnet IP of the target. + IP string `json:"ip"` + // FQDN is the full tailnet FQDN of the target. + FQDN string `json:"fqdn"` +} + +// PorMap is a mapping between match port on which proxy receives cluster +// traffic and target port where traffic received on match port should be +// fowardded to. +type PortMap struct { + Protocol string `json:"protocol"` + MatchPort uint16 `json:"matchPort"` + TargetPort uint16 `json:"targetPort"` +} + +type PortMaps map[PortMap]struct{} + +// PortMaps is a list of PortMap structs, however, we want to use it as a set +// with efficient lookups in code. It implements custom JSON marshalling +// methods to convert between being a list in JSON and a set (map with empty +// values) in code. +var _ json.Marshaler = &PortMaps{} +var _ json.Marshaler = PortMaps{} +var _ json.Unmarshaler = &PortMaps{} + +func (p *PortMaps) UnmarshalJSON(data []byte) error { + *p = make(map[PortMap]struct{}) + + var l []PortMap + if err := json.Unmarshal(data, &l); err != nil { + return err + } + + for _, pm := range l { + (*p)[pm] = struct{}{} + } + + return nil +} + +func (p PortMaps) MarshalJSON() ([]byte, error) { + l := make([]PortMap, 0, len(p)) + for pm := range p { + l = append(l, pm) + } + + return json.Marshal(l) +} + +// Status represents the currently configured firewall rules for all egress +// services for a proxy identified by the PodIP. +type Status struct { + PodIPv4 string `json:"podIPv4"` + // All egress service status keyed by service name. + Services map[string]*ServiceStatus `json:"services"` +} + +// ServiceStatus is the currently configured firewall rules for an egress +// service. +type ServiceStatus struct { + Ports PortMaps `json:"ports"` + // TailnetTargetIPs are the tailnet target IPs that were used to + // configure these firewall rules. For a TailnetTarget with IP set, this + // is the same as IP. + TailnetTargetIPs []netip.Addr `json:"tailnetTargetIPs"` + TailnetTarget TailnetTarget `json:"tailnetTarget"` +} diff --git a/kube/egressservices/egressservices_test.go b/kube/egressservices/egressservices_test.go new file mode 100644 index 0000000000000..d6f952ea0a463 --- /dev/null +++ b/kube/egressservices/egressservices_test.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package egressservices + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func Test_jsonUnmarshalConfig(t *testing.T) { + tests := []struct { + name string + bs []byte + wantsCfg Config + wantsErr bool + }{ + { + name: "success", + bs: []byte(`{"ports":[{"protocol":"tcp","matchPort":4003,"targetPort":80}]}`), + wantsCfg: Config{Ports: map[PortMap]struct{}{{Protocol: "tcp", MatchPort: 4003, TargetPort: 80}: {}}}, + }, + { + name: "failure_invalid_format", + bs: []byte(`{"ports":{"tcp:80":{}}}`), + wantsCfg: Config{Ports: map[PortMap]struct{}{}}, + wantsErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := Config{} + if gotErr := json.Unmarshal(tt.bs, &cfg); (gotErr != nil) != tt.wantsErr { + t.Errorf("json.Unmarshal returned error %v, wants error %v", gotErr, tt.wantsErr) + } + if diff := cmp.Diff(cfg, tt.wantsCfg); diff != "" { + t.Errorf("unexpected secrets (-got +want):\n%s", diff) + } + }) + } +} + +func Test_jsonMarshalConfig(t *testing.T) { + tests := []struct { + name string + protocol string + matchPort uint16 + targetPort uint16 + wantsBs []byte + }{ + { + name: "success", + protocol: "tcp", + matchPort: 4003, + targetPort: 80, + wantsBs: []byte(`{"tailnetTarget":{"ip":"","fqdn":""},"ports":[{"protocol":"tcp","matchPort":4003,"targetPort":80}]}`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := Config{Ports: PortMaps{{ + Protocol: tt.protocol, + MatchPort: tt.matchPort, + TargetPort: tt.targetPort}: {}}} + + gotBs, gotErr := json.Marshal(&cfg) + if gotErr != nil { + t.Errorf("json.Marshal(%+#v) returned unexpected error %v", cfg, gotErr) + } + if diff := cmp.Diff(gotBs, tt.wantsBs); diff != "" { + t.Errorf("unexpected secrets (-got +want):\n%s", diff) + } + }) + } +} diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index 35cb4f713e5a6..e8ddec75d1584 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -257,8 +257,8 @@ type JSONPatch struct { // It currently (2023-03-02) only supports "add" and "remove" operations. func (c *client) JSONPatchSecret(ctx context.Context, name string, patch []JSONPatch) error { for _, p := range patch { - if p.Op != "remove" && p.Op != "add" { - panic(fmt.Errorf("unsupported JSON patch operation: %q", p.Op)) + if p.Op != "remove" && p.Op != "add" && p.Op != "replace" { + return fmt.Errorf("unsupported JSON patch operation: %q", p.Op) } } return c.doRequest(ctx, "PATCH", c.secretURL(name), patch, nil, setHeader("Content-Type", "application/json-patch+json")) diff --git a/kube/kubetypes/metrics.go b/kube/kubetypes/metrics.go index e9e30cfc7e829..b183f1f6f79f7 100644 --- a/kube/kubetypes/metrics.go +++ b/kube/kubetypes/metrics.go @@ -21,4 +21,6 @@ const ( MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources" MetricNameserverCount = "k8s_nameserver_resources" MetricRecorderCount = "k8s_recorder_resources" + MetricEgressServiceCount = "k8s_egress_service_resources" + MetricProxyGroupCount = "k8s_proxygroup_resources" ) diff --git a/licenses/android.md b/licenses/android.md index 64e321de6a9ed..ef53117e8ceb7 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -26,6 +26,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) + - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.6.0/LICENSE)) @@ -37,7 +38,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify](https://pkg.go.dev/github.com/illarion/gonotify) ([MIT](https://github.com/illarion/gonotify/blob/v1.0.1/LICENSE)) + - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -57,32 +58,30 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) + - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) - - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/intern](https://pkg.go.dev/go4.org/intern) ([BSD-3-Clause](https://github.com/go4org/intern/blob/ae77deb06f29/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.25.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.26.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/1b970713:LICENSE)) - - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/c58ccf4b:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.19.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.27.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.7.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.22.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.22.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.16.0:LICENSE)) + - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.20.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.28.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.8.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.23.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.23.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.17.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.5.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.23.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.24.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - - [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) ([ISC](https://github.com/nhooyr/websocket-old/blob/v1.8.10/LICENSE.txt)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 5dd9b975c0f15..4cb100c625942 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -12,22 +12,22 @@ See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.26.1/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.11/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.11/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.1/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.5/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.5/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.0/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.26.1/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.7/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.16/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.16/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.20.5/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.23.4/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.28.6/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.4/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.4/internal/sync/singleflight/LICENSE)) - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) @@ -42,7 +42,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify](https://pkg.go.dev/github.com/illarion/gonotify) ([MIT](https://github.com/illarion/gonotify/blob/v1.0.1/LICENSE)) + - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/15c9b8791914/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -64,13 +64,12 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) + - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) - - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 2fd07fc3f058e..544aa91cecab1 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -52,7 +52,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/v1.7.2/LICENSE)) - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify](https://pkg.go.dev/github.com/illarion/gonotify) ([MIT](https://github.com/illarion/gonotify/blob/v1.0.1/LICENSE)) + - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -81,17 +81,16 @@ Some packages may only be included on certain architectures or operating systems - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/golang-x-crypto](https://pkg.go.dev/github.com/tailscale/golang-x-crypto) ([BSD-3-Clause](https://github.com/tailscale/golang-x-crypto/blob/3fde5e568aa4/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) + - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/b535050b2aa4/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/5db17b287bf1/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/71393c576b98/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/799c1978fafc/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tcnksm/go-httpstat](https://pkg.go.dev/github.com/tcnksm/go-httpstat) ([MIT](https://github.com/tcnksm/go-httpstat/blob/v0.2.0/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.12.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/a3c409a6018e/LICENSE)) - - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/4f986261bf13/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 77281a7ab2ac3..e7f7f6f13ca08 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -13,22 +13,22 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.26.1/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.11/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.11/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.1/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.5/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.5/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.0/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.26.1/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.7/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.28/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.28/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.12/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.16/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.16/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.30.4/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.18/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.20.5/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.23.4/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.28.6/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.22.5/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.26.5/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.30.4/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.20.4/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.20.4/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) @@ -56,12 +56,11 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/cabfb018fe85/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/4327221bd339/LICENSE)) + - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/52804fd3056a/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/6580b55d49ca/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netlink/nl](https://pkg.go.dev/github.com/vishvananda/netlink/nl) ([Apache-2.0](https://github.com/vishvananda/netlink/blob/v1.2.1-beta.2/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 9e00a3ad498c1..0d2af77f2d703 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -576,37 +576,18 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, if envknob.NoLogsNoSupport() || testenv.InTest() { logf("You have disabled logging. Tailscale will not be able to provide support.") conf.HTTPC = &http.Client{Transport: noopPretendSuccessTransport{}} - } else if val := getLogTarget(); val != "" { - logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") - conf.BaseURL = val - u, _ := url.Parse(val) - conf.HTTPC = &http.Client{Transport: NewLogtailTransport(u.Host, netMon, health, logf)} - } - - filchOptions := filch.Options{ - ReplaceStderr: redirectStderrToLogPanics(), - } - filchPrefix := filepath.Join(dir, cmdName) - - // NAS disks cannot hibernate if we're writing logs to them all the time. - // https://github.com/tailscale/tailscale/issues/3551 - if runtime.GOOS == "linux" && (distro.Get() == distro.Synology || distro.Get() == distro.QNAP) { - tmpfsLogs := "/tmp/tailscale-logs" - if err := os.MkdirAll(tmpfsLogs, 0755); err == nil { - filchPrefix = filepath.Join(tmpfsLogs, cmdName) - filchOptions.MaxFileSize = 1 << 20 - } else { - // not a fatal error, we can leave the log files on the spinning disk - logf("Unable to create /tmp directory for log storage: %v\n", err) + } else { + // Only attach an on-disk filch buffer if we are going to be sending logs. + // No reason to persist them locally just to drop them later. + attachFilchBuffer(&conf, dir, cmdName, logf) + + if val := getLogTarget(); val != "" { + logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") + conf.BaseURL = val + u, _ := url.Parse(val) + conf.HTTPC = &http.Client{Transport: NewLogtailTransport(u.Host, netMon, health, logf)} } - } - filchBuf, filchErr := filch.New(filchPrefix, filchOptions) - if filchBuf != nil { - conf.Buffer = filchBuf - if filchBuf.OrigStderr != nil { - conf.Stderr = filchBuf.OrigStderr - } } lw := logtail.NewLogger(conf, logf) @@ -631,9 +612,6 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, goVersion(), os.Args) logf("LogID: %v", newc.PublicID) - if filchErr != nil { - logf("filch failed: %v", filchErr) - } if earlyErrBuf.Len() != 0 { logf("%s", earlyErrBuf.Bytes()) } @@ -645,6 +623,40 @@ func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, } } +// attachFilchBuffer creates an on-disk ring buffer using filch and attaches +// it to the logtail config. Note that this is optional; if no buffer is set, +// logtail will use an in-memory buffer. +func attachFilchBuffer(conf *logtail.Config, dir, cmdName string, logf logger.Logf) { + filchOptions := filch.Options{ + ReplaceStderr: redirectStderrToLogPanics(), + } + filchPrefix := filepath.Join(dir, cmdName) + + // NAS disks cannot hibernate if we're writing logs to them all the time. + // https://github.com/tailscale/tailscale/issues/3551 + if runtime.GOOS == "linux" && (distro.Get() == distro.Synology || distro.Get() == distro.QNAP) { + tmpfsLogs := "/tmp/tailscale-logs" + if err := os.MkdirAll(tmpfsLogs, 0755); err == nil { + filchPrefix = filepath.Join(tmpfsLogs, cmdName) + filchOptions.MaxFileSize = 1 << 20 + } else { + // not a fatal error, we can leave the log files on the spinning disk + logf("Unable to create /tmp directory for log storage: %v\n", err) + } + } + + filchBuf, filchErr := filch.New(filchPrefix, filchOptions) + if filchBuf != nil { + conf.Buffer = filchBuf + if filchBuf.OrigStderr != nil { + conf.Stderr = filchBuf.OrigStderr + } + } + if filchErr != nil { + logf("filch failed: %v", filchErr) + } +} + // dialLog is used by NewLogtailTransport to log the happy path of its // own dialing. // @@ -801,6 +813,8 @@ func NewLogtailTransport(host string, netMon *netmon.Monitor, health *health.Tra } tr.TLSClientConfig = tlsdial.Config(host, health, tr.TLSClientConfig) + // Force TLS 1.3 since we know log.tailscale.io supports it. + tr.TLSClientConfig.MinVersion = tls.VersionTLS13 return tr } diff --git a/metrics/multilabelmap.go b/metrics/multilabelmap.go index df2ae5073bf5f..223a55a75bf1b 100644 --- a/metrics/multilabelmap.go +++ b/metrics/multilabelmap.go @@ -97,6 +97,7 @@ type KeyValue[T comparable] struct { } func (v *MultiLabelMap[T]) String() string { + // NOTE: This has to be valid JSON because it's used by expvar. return `"MultiLabelMap"` } @@ -281,3 +282,16 @@ func (v *MultiLabelMap[T]) Do(f func(KeyValue[T])) { f(KeyValue[T]{e.key, e.val}) } } + +// ResetAllForTest resets all values for metrics to zero. +// Should only be used in tests. +func (v *MultiLabelMap[T]) ResetAllForTest() { + v.Do(func(kv KeyValue[T]) { + switch v := kv.Value.(type) { + case *expvar.Int: + v.Set(0) + case *expvar.Float: + v.Set(0) + } + }) +} diff --git a/metrics/multilabelmap_test.go b/metrics/multilabelmap_test.go index b53e15ec8913e..195696234e545 100644 --- a/metrics/multilabelmap_test.go +++ b/metrics/multilabelmap_test.go @@ -5,6 +5,7 @@ package metrics import ( "bytes" + "encoding/json" "expvar" "fmt" "io" @@ -129,3 +130,21 @@ func BenchmarkMultiLabelWriteAllocs(b *testing.B) { m.WritePrometheus(w, "test") } } + +func TestMultiLabelMapExpvar(t *testing.T) { + m := new(MultiLabelMap[L2]) + m.Add(L2{"a", "b"}, 2) + m.Add(L2{"b", "c"}, 4) + + em := new(expvar.Map) + em.Set("multi", m) + + // Ensure that the String method is valid JSON to ensure that it can be + // used by expvar. + encoded := []byte(em.String()) + if !json.Valid(encoded) { + t.Fatalf("invalid JSON: %s", encoded) + } + + t.Logf("em = %+v", em) +} diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index e0a4b0a250768..c6e8bca3a19a2 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -112,7 +112,7 @@ func (d *Detector) detectCaptivePortalWithGOOS(ctx context.Context, netMon *netm // interfaces on iOS and Android, respectively, and would be needlessly battery-draining. func interfaceNameDoesNotNeedCaptiveDetection(ifName string, goos string) bool { ifName = strings.ToLower(ifName) - excludedPrefixes := []string{"tailscale", "tun", "tap", "docker", "kube", "wg"} + excludedPrefixes := []string{"tailscale", "tun", "tap", "docker", "kube", "wg", "ipsec"} if goos == "windows" { excludedPrefixes = append(excludedPrefixes, "loopback", "tunnel", "ppp", "isatap", "teredo", "6to4") } else if goos == "darwin" || goos == "ios" { @@ -179,6 +179,9 @@ func (d *Detector) detectOnInterface(ctx context.Context, ifIndex int, endpoints // verifyCaptivePortalEndpoint checks if the given Endpoint is a captive portal by making an HTTP request to the // given Endpoint URL using the interface with index ifIndex, and checking if the response looks like a captive portal. func (d *Detector) verifyCaptivePortalEndpoint(ctx context.Context, e Endpoint, ifIndex int) (found bool, err error) { + ctx, cancel := context.WithTimeout(ctx, Timeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, "GET", e.URL.String(), nil) if err != nil { return false, err @@ -213,7 +216,8 @@ func (d *Detector) dialContext(ctx context.Context, network, addr string) (net.C ifIndex := d.currIfIndex - dl := net.Dialer{ + dl := &net.Dialer{ + Timeout: Timeout, Control: func(network, address string, c syscall.RawConn) error { return setSocketInterfaceIndex(c, ifIndex, d.logf) }, diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index b8eaccbdd22eb..846ca3d5e4fb5 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "crypto/sha256" + "crypto/tls" "encoding/base64" "encoding/binary" "errors" @@ -58,10 +59,20 @@ func truncatedFlagSet(pkt []byte) bool { } const ( - // dohTransportTimeout is how long to keep idle HTTP - // connections open to DNS-over-HTTPs servers. This is pretty - // arbitrary. - dohTransportTimeout = 30 * time.Second + // dohIdleConnTimeout is how long to keep idle HTTP connections + // open to DNS-over-HTTPS servers. 10 seconds is a sensible + // default, as it's long enough to handle a burst of queries + // coming in a row, but short enough to not keep idle connections + // open for too long. In theory, idle connections could be kept + // open for a long time without any battery impact as no traffic + // is supposed to be flowing on them. + // However, in practice, DoH servers will send TCP keepalives (e.g. + // NextDNS sends them every ~10s). Handling these keepalives + // wakes up the modem, and that uses battery. Therefore, we keep + // the idle timeout low enough to allow idle connections to be + // closed during an extended period with no DNS queries, killing + // keepalive network activity. + dohIdleConnTimeout = 10 * time.Second // dohTransportTimeout is how much of a head start to give a DoH query // that was upgraded from a well-known public DNS provider's IP before @@ -426,19 +437,26 @@ func (f *forwarder) getKnownDoHClientForProvider(urlBase string) (c *http.Client SingleHostStaticResult: allIPs, Logf: f.logf, }) + tlsConfig := &tls.Config{ + // Enforce TLS 1.3, as all of our supported DNS-over-HTTPS servers are compatible with it + // (see tailscale.com/net/dns/publicdns/publicdns.go). + MinVersion: tls.VersionTLS13, + } c = &http.Client{ Transport: &http.Transport{ ForceAttemptHTTP2: true, - IdleConnTimeout: dohTransportTimeout, + IdleConnTimeout: dohIdleConnTimeout, // On mobile platforms TCP KeepAlive is disabled in the dialer, // ensure that we timeout if the connection appears to be hung. ResponseHeaderTimeout: 10 * time.Second, + MaxIdleConnsPerHost: 1, DialContext: func(ctx context.Context, netw, addr string) (net.Conn, error) { if !strings.HasPrefix(netw, "tcp") { return nil, fmt.Errorf("unexpected network %q", netw) } return dialer(ctx, netw, addr) }, + TLSClientConfig: tlsConfig, }, } if f.dohClient == nil { @@ -834,6 +852,17 @@ func (f *forwarder) resolvers(domain dnsname.FQDN) []resolverAndDelay { return cloudHostFallback // or nil if no fallback } +// GetUpstreamResolvers returns the resolvers that would be used to resolve +// the given FQDN. +func (f *forwarder) GetUpstreamResolvers(name dnsname.FQDN) []*dnstype.Resolver { + resolvers := f.resolvers(name) + upstreamResolvers := make([]*dnstype.Resolver, 0, len(resolvers)) + for _, r := range resolvers { + upstreamResolvers = append(upstreamResolvers, r.name) + } + return upstreamResolvers +} + // forwardQuery is information and state about a forwarded DNS query that's // being sent to 1 or more upstreams. // @@ -1093,6 +1122,8 @@ func nxDomainResponse(req packet) (res packet, err error) { // TODO(bradfitz): should we add an SOA record in the Authority // section too? (for the nxdomain negative caching TTL) // For which zone? Does iOS care? + b.StartQuestions() + b.Question(p.Question) res.bs, err = b.Finish() res.addr = req.addr return res, err diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 465618a54596c..09d8109018156 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -15,6 +15,7 @@ import ( "net/netip" "os" "reflect" + "slices" "strings" "sync" "sync/atomic" @@ -406,7 +407,7 @@ func enableDebug(tb testing.TB) { func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) { name := dns.MustNewName(domain) - builder := dns.NewBuilder(nil, dns.Header{}) + builder := dns.NewBuilder(nil, dns.Header{Response: true}) builder.StartQuestions() builder.Question(dns.Question{ Name: name, @@ -463,19 +464,26 @@ func runTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwa modify(fwd) } - fq := &forwardQuery{ - txid: getTxID(request), - packet: request, - closeOnCtxDone: new(closePool), - family: "tcp", - } - defer fq.closeOnCtxDone.Close() - rr := resolverAndDelay{ name: &dnstype.Resolver{Addr: fmt.Sprintf("127.0.0.1:%d", port)}, } - return fwd.send(context.Background(), fq, rr) + rpkt := packet{ + bs: request, + family: "tcp", + addr: netip.MustParseAddrPort("127.0.0.1:12345"), + } + + rchan := make(chan packet, 1) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + tb.Cleanup(cancel) + err = fwd.forwardWithDestChan(ctx, rpkt, rchan, rr) + select { + case res := <-rchan: + return res.bs, err + case <-ctx.Done(): + return nil, ctx.Err() + } } func mustRunTestQuery(tb testing.TB, port uint16, request []byte, modify func(*forwarder)) []byte { @@ -609,7 +617,8 @@ func TestForwarderTCPFallbackError(t *testing.T) { name := dns.MustNewName(domain) builder := dns.NewBuilder(nil, dns.Header{ - RCode: dns.RCodeServerFailure, + Response: true, + RCode: dns.RCodeServerFailure, }) builder.StartQuestions() builder.Question(dns.Question{ @@ -658,3 +667,58 @@ func TestForwarderTCPFallbackError(t *testing.T) { t.Errorf("wanted errServerFailure, got: %v", err) } } + +// mdnsResponder at minimum has an expectation that NXDOMAIN must include the +// question, otherwise it will penalize our server (#13511). +func TestNXDOMAINIncludesQuestion(t *testing.T) { + var domain = "lb._dns-sd._udp.example.org." + + // Our response is a NXDOMAIN + response := func() []byte { + name := dns.MustNewName(domain) + + builder := dns.NewBuilder(nil, dns.Header{ + Response: true, + RCode: dns.RCodeNameError, + }) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: name, + Type: dns.TypePTR, + Class: dns.ClassINET, + }) + response, err := builder.Finish() + if err != nil { + t.Fatal(err) + } + return response + }() + + // Our request is a single PTR query for the domain in the answer, above. + request := func() []byte { + builder := dns.NewBuilder(nil, dns.Header{}) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: dns.MustNewName(domain), + Type: dns.TypePTR, + Class: dns.ClassINET, + }) + request, err := builder.Finish() + if err != nil { + t.Fatal(err) + } + return request + }() + + port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { + }) + + res, err := runTestQuery(t, port, request, nil) + if err != nil { + t.Fatal(err) + } + + if !slices.Equal(res, response) { + t.Errorf("invalid response\ngot: %+v\nwant: %+v", res, response) + } +} diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 90e447020ed79..d196ad4d6c1f0 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -337,6 +337,12 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net return out, err } +// GetUpstreamResolvers returns the resolvers that would be used to resolve +// the given FQDN. +func (r *Resolver) GetUpstreamResolvers(name dnsname.FQDN) []*dnstype.Resolver { + return r.forwarder.GetUpstreamResolvers(name) +} + // parseExitNodeQuery parses a DNS request packet. // It returns nil if it's malformed or lacking a question. func parseExitNodeQuery(q []byte) *response { diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index 3d061e2078a1d..2cbea6c0fd896 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -416,10 +416,10 @@ func (d *dialer) DialContext(ctx context.Context, network, address string) (retC if len(i4s) < 2 { d.dnsCache.dlogf("dialing %s, %s for %s", network, ip, address) c, err := dc.dialOne(ctx, ip.Unmap()) - if err == nil || ctx.Err() != nil { + if err == nil || ctx.Err() != nil || !ip6.IsValid() { return c, err } - // Fall back to trying IPv6, if any. + // Fall back to trying IPv6. return dc.dialOne(ctx, ip6) } diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 8eb50a61dd340..dbb85cf9c0945 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -52,9 +52,9 @@ var ( // The various default timeouts for things. const ( - // overallProbeTimeout is the maximum amount of time netcheck will + // ReportTimeout is the maximum amount of time netcheck will // spend gathering a single report. - overallProbeTimeout = 5 * time.Second + ReportTimeout = 5 * time.Second // stunTimeout is the maximum amount of time netcheck will spend // probing with STUN packets without getting a reply before // switching to HTTP probing, on the assumption that outbound UDP @@ -63,6 +63,11 @@ const ( // icmpProbeTimeout is the maximum amount of time netcheck will spend // probing with ICMP packets. icmpProbeTimeout = 1 * time.Second + // httpsProbeTimeout is the maximum amount of time netcheck will spend + // probing over HTTPS. This is set equal to ReportTimeout to allow HTTPS + // whatever time is left following STUN, which precedes it in a netcheck + // report. + httpsProbeTimeout = ReportTimeout // defaultActiveRetransmitTime is the retransmit interval we use // for STUN probes when we're in steady state (not in start-up), // but don't have previous latency information for a DERP @@ -498,6 +503,10 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob plan = make(probePlan) for _, reg := range dm.Regions { + if len(reg.Nodes) == 0 { + continue + } + var p4 []probe var p6 []probe for try := 0; try < 3; try++ { @@ -719,6 +728,9 @@ type GetReportOpts struct { // If no communication with that region has occurred, or it occurred // too far in the past, this function should return the zero time. GetLastDERPActivity func(int) time.Time + // OnlyTCP443 constrains netcheck reporting to measurements over TCP port + // 443. + OnlyTCP443 bool } // getLastDERPActivity calls o.GetLastDERPActivity if both o and @@ -731,6 +743,10 @@ func (o *GetReportOpts) getLastDERPActivity(region int) time.Time { } // GetReport gets a report. The 'opts' argument is optional and can be nil. +// Callers are discouraged from passing a ctx with an arbitrary deadline as this +// may cause GetReport to return prematurely before all reporting methods have +// executed. ReportTimeout is the maximum amount of time GetReport will spend +// gathering a report. // // It may not be called concurrently with itself. func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetReportOpts) (_ *Report, reterr error) { @@ -743,7 +759,7 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // Mask user context with ours that we guarantee to cancel so // we can depend on it being closed in goroutines later. // (User ctx might be context.Background, etc) - ctx, cancel := context.WithTimeout(ctx, overallProbeTimeout) + ctx, cancel := context.WithTimeout(ctx, ReportTimeout) defer cancel() ctx = sockstats.WithSockStats(ctx, sockstats.LabelNetcheckClient, c.logf) @@ -829,7 +845,10 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe go rs.probePortMapServices() } - plan := makeProbePlan(dm, ifState, last) + var plan probePlan + if opts == nil || !opts.OnlyTCP443 { + plan = makeProbePlan(dm, ifState, last) + } // If we're doing a full probe, also check for a captive portal. We // delay by a bit to wait for UDP STUN to finish, to avoid the probe if @@ -921,19 +940,20 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe } } if len(need) > 0 { - // Kick off ICMP in parallel to HTTPS checks; we don't - // reuse the same WaitGroup for those probes because we - // need to close the underlying Pinger after a timeout - // or when all ICMP probes are done, regardless of - // whether the HTTPS probes have finished. - wg.Add(1) - go func() { - defer wg.Done() - if err := c.measureAllICMPLatency(ctx, rs, need); err != nil { - c.logf("[v1] measureAllICMPLatency: %v", err) - } - }() - + if !opts.OnlyTCP443 { + // Kick off ICMP in parallel to HTTPS checks; we don't + // reuse the same WaitGroup for those probes because we + // need to close the underlying Pinger after a timeout + // or when all ICMP probes are done, regardless of + // whether the HTTPS probes have finished. + wg.Add(1) + go func() { + defer wg.Done() + if err := c.measureAllICMPLatency(ctx, rs, need); err != nil { + c.logf("[v1] measureAllICMPLatency: %v", err) + } + }() + } wg.Add(len(need)) c.logf("netcheck: UDP is blocked, trying HTTPS") } @@ -1044,7 +1064,7 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegion) (time.Duration, netip.Addr, error) { metricHTTPSend.Add(1) var result httpstat.Result - ctx, cancel := context.WithTimeout(httpstat.WithHTTPStat(ctx, &result), overallProbeTimeout) + ctx, cancel := context.WithTimeout(httpstat.WithHTTPStat(ctx, &result), httpsProbeTimeout) defer cancel() var ip netip.Addr diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 2f18705762829..02076f8d468e1 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -860,3 +860,15 @@ func TestNodeAddrResolve(t *testing.T) { }) } } + +func TestReportTimeouts(t *testing.T) { + if ReportTimeout < stunProbeTimeout { + t.Errorf("ReportTimeout (%v) cannot be less than stunProbeTimeout (%v)", ReportTimeout, stunProbeTimeout) + } + if ReportTimeout < icmpProbeTimeout { + t.Errorf("ReportTimeout (%v) cannot be less than icmpProbeTimeout (%v)", ReportTimeout, icmpProbeTimeout) + } + if ReportTimeout < httpsProbeTimeout { + t.Errorf("ReportTimeout (%v) cannot be less than httpsProbeTimeout (%v)", ReportTimeout, httpsProbeTimeout) + } +} diff --git a/net/netutil/routes.go b/net/netutil/routes.go index e86bb861cb50b..7d67d3695e10d 100644 --- a/net/netutil/routes.go +++ b/net/netutil/routes.go @@ -13,11 +13,6 @@ import ( "tailscale.com/net/tsaddr" ) -var ( - ipv4default = netip.MustParsePrefix("0.0.0.0/0") - ipv6default = netip.MustParsePrefix("::/0") -) - func validateViaPrefix(ipp netip.Prefix) error { if !tsaddr.IsViaPrefix(ipp) { return fmt.Errorf("%v is not a 4-in-6 prefix", ipp) @@ -60,22 +55,22 @@ func CalcAdvertiseRoutes(advertiseRoutes string, advertiseDefaultRoute bool) ([] return nil, err } } - if ipp == ipv4default { + if ipp == tsaddr.AllIPv4() { default4 = true - } else if ipp == ipv6default { + } else if ipp == tsaddr.AllIPv6() { default6 = true } routeMap[ipp] = true } if default4 && !default6 { - return nil, fmt.Errorf("%s advertised without its IPv6 counterpart, please also advertise %s", ipv4default, ipv6default) + return nil, fmt.Errorf("%s advertised without its IPv6 counterpart, please also advertise %s", tsaddr.AllIPv4(), tsaddr.AllIPv6()) } else if default6 && !default4 { - return nil, fmt.Errorf("%s advertised without its IPv4 counterpart, please also advertise %s", ipv6default, ipv4default) + return nil, fmt.Errorf("%s advertised without its IPv4 counterpart, please also advertise %s", tsaddr.AllIPv6(), tsaddr.AllIPv4()) } } if advertiseDefaultRoute { - routeMap[netip.MustParsePrefix("0.0.0.0/0")] = true - routeMap[netip.MustParsePrefix("::/0")] = true + routeMap[tsaddr.AllIPv4()] = true + routeMap[tsaddr.AllIPv6()] = true } if len(routeMap) == 0 { return nil, nil diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 7cdca1fb3b340..71b55b8a7f240 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -781,6 +781,10 @@ func parsePMPResponse(pkt []byte) (res pmpResponse, ok bool) { return res, false } res.PublicAddr = netaddr.IPv4(pkt[8], pkt[9], pkt[10], pkt[11]) + if res.PublicAddr.IsUnspecified() { + // Zero it out so it's not Valid and used accidentally elsewhere. + res.PublicAddr = netip.Addr{} + } } return res, true diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 54b0f81b2a362..f1199f0a6c584 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -638,6 +638,19 @@ func (c *Client) tryUPnPPortmapWithDevice( return netip.AddrPort{}, nil, err } + // Do a bit of validation on the external IP; we've seen cases where + // UPnP devices return the public IP 0.0.0.0, which obviously doesn't + // work as an endpoint. + // + // See: https://github.com/tailscale/corp/issues/23538 + if externalIP.IsUnspecified() { + c.logf("UPnP returned unspecified external IP %v", externalIP) + return netip.AddrPort{}, nil, fmt.Errorf("UPnP returned unspecified external IP") + } else if externalIP.IsLoopback() { + c.logf("UPnP returned loopback external IP %v", externalIP) + return netip.AddrPort{}, nil, fmt.Errorf("UPnP returned loopback external IP") + } + return netip.AddrPortFrom(externalIP, newPort), client, nil } diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index 79e05dd9916d0..c41b535a54df2 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -599,13 +599,7 @@ func TestGetUPnPPortMapping(t *testing.T) { ) for i := range 2 { sawRequestWithLease.Store(false) - res, err := c.Probe(ctx) - if err != nil { - t.Fatalf("Probe: %v", err) - } - if !res.UPnP { - t.Errorf("didn't detect UPnP") - } + mustProbeUPnP(t, ctx, c) gw, myIP, ok := c.gatewayAndSelfIP() if !ok { @@ -656,13 +650,7 @@ func TestGetUPnPPortMapping_NoValidServices(t *testing.T) { c.debug.VerboseLogs = true ctx := context.Background() - res, err := c.Probe(ctx) - if err != nil { - t.Fatalf("Probe: %v", err) - } - if !res.UPnP { - t.Errorf("didn't detect UPnP") - } + mustProbeUPnP(t, ctx, c) gw, myIP, ok := c.gatewayAndSelfIP() if !ok { @@ -705,13 +693,7 @@ func TestGetUPnPPortMapping_Legacy(t *testing.T) { c.debug.VerboseLogs = true ctx := context.Background() - res, err := c.Probe(ctx) - if err != nil { - t.Fatalf("Probe: %v", err) - } - if !res.UPnP { - t.Errorf("didn't detect UPnP") - } + mustProbeUPnP(t, ctx, c) gw, myIP, ok := c.gatewayAndSelfIP() if !ok { @@ -838,6 +820,58 @@ func TestProcessUPnPResponses(t *testing.T) { } } +// See: https://github.com/tailscale/corp/issues/23538 +func TestGetUPnPPortMapping_Invalid(t *testing.T) { + for _, responseAddr := range []string{ + "0.0.0.0", + "127.0.0.1", + } { + t.Run(responseAddr, func(t *testing.T) { + igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + if err != nil { + t.Fatal(err) + } + defer igd.Close() + + // This is a very basic fake UPnP server handler. + handlers := map[string]any{ + "AddPortMapping": testAddPortMappingResponse, + "GetExternalIPAddress": makeGetExternalIPAddressResponse(responseAddr), + "GetStatusInfo": testGetStatusInfoResponse, + "DeletePortMapping": "", // Do nothing for test + } + + igd.SetUPnPHandler(&upnpServer{ + t: t, + Desc: huaweiRootDescXML, + Control: map[string]map[string]any{ + "/ctrlt/WANPPPConnection_1": handlers, + }, + }) + + c := newTestClient(t, igd) + defer c.Close() + c.debug.VerboseLogs = true + + ctx := context.Background() + mustProbeUPnP(t, ctx, c) + + gw, myIP, ok := c.gatewayAndSelfIP() + if !ok { + t.Fatalf("could not get gateway and self IP") + } + + ext, ok := c.getUPnPPortMapping(ctx, gw, netip.AddrPortFrom(myIP, 12345), 0) + if ok { + t.Fatal("did not expect to get UPnP port mapping") + } + if ext.IsValid() { + t.Fatalf("expected no external address; got %v", ext) + } + }) + } +} + type upnpServer struct { t *testing.T Desc string // root device XML @@ -921,6 +955,18 @@ func (u *upnpServer) handleControl(w http.ResponseWriter, r *http.Request, handl } } +func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) ProbeResult { + tb.Helper() + res, err := c.Probe(ctx) + if err != nil { + tb.Fatalf("Probe: %v", err) + } + if !res.UPnP { + tb.Fatalf("didn't detect UPnP") + } + return res +} + const testRootDesc = ` @@ -1058,3 +1104,15 @@ const testLegacyGetStatusInfoResponse = ` ` + +func makeGetExternalIPAddressResponse(ip string) string { + return fmt.Sprintf(` + + + + %s + + + +`, ip) +} diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index 2d1ccd5a37de0..af691302f8be8 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -18,6 +18,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/version" ) const IsAvailable = true @@ -156,7 +157,11 @@ func withSockStats(ctx context.Context, label Label, logf logger.Logf) context.C } } willOverwrite := func(trace *net.SockTrace) { - logf("sockstats: trace %q was overwritten by another", label) + if version.IsUnstableBuild() { + // Only spam about this in dev builds. + // See https://github.com/tailscale/tailscale/issues/13731 for known problems. + logf("sockstats: trace %q was overwritten by another", label) + } } return net.WithSockTrace(ctx, &net.SockTrace{ diff --git a/net/tsaddr/tsaddr.go b/net/tsaddr/tsaddr.go index ff1ef460e67e7..b951899e22b22 100644 --- a/net/tsaddr/tsaddr.go +++ b/net/tsaddr/tsaddr.go @@ -220,6 +220,17 @@ func ContainsExitRoutes(rr views.Slice[netip.Prefix]) bool { return v4 && v6 } +// ContainsExitRoute reports whether rr contains at least one of IPv4 or +// IPv6 /0 (exit) routes. +func ContainsExitRoute(rr views.Slice[netip.Prefix]) bool { + for i := range rr.Len() { + if rr.At(i).Bits() == 0 { + return true + } + } + return false +} + // ContainsNonExitSubnetRoutes reports whether v contains Subnet // Routes other than ExitNode Routes. func ContainsNonExitSubnetRoutes(rr views.Slice[netip.Prefix]) bool { @@ -231,6 +242,38 @@ func ContainsNonExitSubnetRoutes(rr views.Slice[netip.Prefix]) bool { return false } +// WithoutExitRoutes returns rr unchanged if it has only 1 or 0 /0 +// routes. If it has both IPv4 and IPv6 /0 routes, then it returns +// a copy with all /0 routes removed. +func WithoutExitRoutes(rr views.Slice[netip.Prefix]) views.Slice[netip.Prefix] { + if !ContainsExitRoutes(rr) { + return rr + } + var out []netip.Prefix + for _, r := range rr.All() { + if r.Bits() > 0 { + out = append(out, r) + } + } + return views.SliceOf(out) +} + +// WithoutExitRoute returns rr unchanged if it has 0 /0 +// routes. If it has a IPv4 or IPv6 /0 routes, then it returns +// a copy with all /0 routes removed. +func WithoutExitRoute(rr views.Slice[netip.Prefix]) views.Slice[netip.Prefix] { + if !ContainsExitRoute(rr) { + return rr + } + var out []netip.Prefix + for _, r := range rr.All() { + if r.Bits() > 0 { + out = append(out, r) + } + } + return views.SliceOf(out) +} + var ( allIPv4 = netip.MustParsePrefix("0.0.0.0/0") allIPv6 = netip.MustParsePrefix("::/0") @@ -245,6 +288,11 @@ func AllIPv6() netip.Prefix { return allIPv6 } // ExitRoutes returns a slice containing AllIPv4 and AllIPv6. func ExitRoutes() []netip.Prefix { return []netip.Prefix{allIPv4, allIPv6} } +// IsExitRoute reports whether p is an exit node route. +func IsExitRoute(p netip.Prefix) bool { + return p == allIPv4 || p == allIPv6 +} + // SortPrefixes sorts the prefixes in place. func SortPrefixes(p []netip.Prefix) { slices.SortFunc(p, netipx.ComparePrefix) diff --git a/net/tsaddr/tsaddr_test.go b/net/tsaddr/tsaddr_test.go index dccc342715ca4..4aa2f8c60f5b3 100644 --- a/net/tsaddr/tsaddr_test.go +++ b/net/tsaddr/tsaddr_test.go @@ -7,7 +7,10 @@ import ( "net/netip" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/net/netaddr" + "tailscale.com/types/views" ) func TestInCrostiniRange(t *testing.T) { @@ -89,3 +92,133 @@ func TestUnmapVia(t *testing.T) { } } } + +func TestIsExitNodeRoute(t *testing.T) { + tests := []struct { + pref netip.Prefix + want bool + }{ + { + pref: AllIPv4(), + want: true, + }, + { + pref: AllIPv6(), + want: true, + }, + { + pref: netip.MustParsePrefix("1.1.1.1/0"), + want: false, + }, + { + pref: netip.MustParsePrefix("1.1.1.1/1"), + want: false, + }, + { + pref: netip.MustParsePrefix("192.168.0.0/24"), + want: false, + }, + } + + for _, tt := range tests { + if got := IsExitRoute(tt.pref); got != tt.want { + t.Errorf("for %q: got %v, want %v", tt.pref, got, tt.want) + } + } +} + +func TestWithoutExitRoutes(t *testing.T) { + tests := []struct { + prefs []netip.Prefix + want []netip.Prefix + }{ + { + prefs: []netip.Prefix{AllIPv4(), AllIPv6()}, + want: []netip.Prefix{}, + }, + { + prefs: []netip.Prefix{AllIPv4()}, + want: []netip.Prefix{AllIPv4()}, + }, + { + prefs: []netip.Prefix{AllIPv4(), AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + want: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/10")}, + }, + { + prefs: []netip.Prefix{AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + want: []netip.Prefix{AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + }, + } + + for _, tt := range tests { + got := WithoutExitRoutes(views.SliceOf(tt.prefs)) + if diff := cmp.Diff(tt.want, got.AsSlice(), cmpopts.EquateEmpty(), cmp.Comparer(func(a, b netip.Prefix) bool { return a == b })); diff != "" { + t.Errorf("unexpected route difference (-want +got):\n%s", diff) + } + } +} + +func TestWithoutExitRoute(t *testing.T) { + tests := []struct { + prefs []netip.Prefix + want []netip.Prefix + }{ + { + prefs: []netip.Prefix{AllIPv4(), AllIPv6()}, + want: []netip.Prefix{}, + }, + { + prefs: []netip.Prefix{AllIPv4()}, + want: []netip.Prefix{}, + }, + { + prefs: []netip.Prefix{AllIPv4(), AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + want: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/10")}, + }, + { + prefs: []netip.Prefix{AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + want: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/10")}, + }, + } + + for _, tt := range tests { + got := WithoutExitRoute(views.SliceOf(tt.prefs)) + if diff := cmp.Diff(tt.want, got.AsSlice(), cmpopts.EquateEmpty(), cmp.Comparer(func(a, b netip.Prefix) bool { return a == b })); diff != "" { + t.Errorf("unexpected route difference (-want +got):\n%s", diff) + } + } +} + +func TestContainsExitRoute(t *testing.T) { + tests := []struct { + prefs []netip.Prefix + want bool + }{ + { + prefs: []netip.Prefix{AllIPv4(), AllIPv6()}, + want: true, + }, + { + prefs: []netip.Prefix{AllIPv4()}, + want: true, + }, + { + prefs: []netip.Prefix{AllIPv4(), AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + want: true, + }, + { + prefs: []netip.Prefix{AllIPv6(), netip.MustParsePrefix("10.0.0.0/10")}, + want: true, + }, + { + prefs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/10")}, + want: false, + }, + } + + for _, tt := range tests { + if got := ContainsExitRoute(views.SliceOf(tt.prefs)); got != tt.want { + t.Errorf("for %q: got %v, want %v", tt.prefs, got, tt.want) + } + } +} diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 514ebcaaf1f5e..dcd43d5718ca8 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,6 +24,7 @@ import ( "go4.org/mem" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" + tsmetrics "tailscale.com/metrics" "tailscale.com/net/connstats" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" @@ -209,6 +210,30 @@ type Wrapper struct { stats atomic.Pointer[connstats.Statistics] captureHook syncs.AtomicValue[capture.Callback] + + metrics *metrics +} + +type metrics struct { + inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[dropPacketLabel] + outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[dropPacketLabel] +} + +func registerMetrics(reg *usermetric.Registry) *metrics { + return &metrics{ + inboundDroppedPacketsTotal: usermetric.NewMultiLabelMapWithRegistry[dropPacketLabel]( + reg, + "tailscaled_inbound_dropped_packets_total", + "counter", + "Counts the number of dropped packets received by the node from other peers", + ), + outboundDroppedPacketsTotal: usermetric.NewMultiLabelMapWithRegistry[dropPacketLabel]( + reg, + "tailscaled_outbound_dropped_packets_total", + "counter", + "Counts the number of packets dropped while being sent to other peers", + ), + } } // tunInjectedRead is an injected packet pretending to be a tun.Read(). @@ -248,15 +273,15 @@ func (w *Wrapper) Start() { close(w.startCh) } -func WrapTAP(logf logger.Logf, tdev tun.Device) *Wrapper { - return wrap(logf, tdev, true) +func WrapTAP(logf logger.Logf, tdev tun.Device, m *usermetric.Registry) *Wrapper { + return wrap(logf, tdev, true, m) } -func Wrap(logf logger.Logf, tdev tun.Device) *Wrapper { - return wrap(logf, tdev, false) +func Wrap(logf logger.Logf, tdev tun.Device, m *usermetric.Registry) *Wrapper { + return wrap(logf, tdev, false, m) } -func wrap(logf logger.Logf, tdev tun.Device, isTAP bool) *Wrapper { +func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry) *Wrapper { logf = logger.WithPrefix(logf, "tstun: ") w := &Wrapper{ logf: logf, @@ -274,6 +299,7 @@ func wrap(logf logger.Logf, tdev tun.Device, isTAP bool) *Wrapper { // TODO(dmytro): (highly rate-limited) hexdumps should happen on unknown packets. filterFlags: filter.LogAccepts | filter.LogDrops, startCh: make(chan struct{}), + metrics: registerMetrics(m), } w.vectorBuffer = make([][]byte, tdev.BatchSize()) @@ -872,7 +898,7 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf if filt.RunOut(p, t.filterFlags) != filter.Accept { metricPacketOutDropFilter.Add(1) - metricOutboundDroppedPacketsTotal.Add(dropPacketLabel{ + t.metrics.outboundDroppedPacketsTotal.Add(dropPacketLabel{ Reason: DropReasonACL, }, 1) return filter.Drop, gro @@ -1144,7 +1170,7 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook ca if outcome != filter.Accept { metricPacketInDropFilter.Add(1) - metricInboundDroppedPacketsTotal.Add(dropPacketLabel{ + t.metrics.inboundDroppedPacketsTotal.Add(dropPacketLabel{ Reason: DropReasonACL, }, 1) @@ -1225,7 +1251,7 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { t.noteActivity() _, err := t.tdevWrite(buffs, offset) if err != nil { - metricInboundDroppedPacketsTotal.Add(dropPacketLabel{ + t.metrics.inboundDroppedPacketsTotal.Add(dropPacketLabel{ Reason: DropReasonError, }, int64(len(buffs))) } @@ -1482,19 +1508,6 @@ type dropPacketLabel struct { Reason DropReason } -var ( - metricInboundDroppedPacketsTotal = usermetric.NewMultiLabelMap[dropPacketLabel]( - "tailscaled_inbound_dropped_packets_total", - "counter", - "Counts the number of dropped packets received by the node from other peers", - ) - metricOutboundDroppedPacketsTotal = usermetric.NewMultiLabelMap[dropPacketLabel]( - "tailscaled_outbound_dropped_packets_total", - "counter", - "Counts the number of packets dropped while being sent to other peers", - ) -) - func (t *Wrapper) InstallCaptureHook(cb capture.Callback) { t.captureHook.Store(cb) } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index f9319210276a3..0ed0075b616ee 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -8,6 +8,7 @@ import ( "context" "encoding/binary" "encoding/hex" + "expvar" "fmt" "net/netip" "reflect" @@ -38,6 +39,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/must" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/wgcfg" @@ -173,7 +175,8 @@ func setfilter(logf logger.Logf, tun *Wrapper) { func newChannelTUN(logf logger.Logf, secure bool) (*tuntest.ChannelTUN, *Wrapper) { chtun := tuntest.NewChannelTUN() - tun := Wrap(logf, chtun.TUN()) + reg := new(usermetric.Registry) + tun := Wrap(logf, chtun.TUN(), reg) if secure { setfilter(logf, tun) } else { @@ -185,7 +188,8 @@ func newChannelTUN(logf logger.Logf, secure bool) (*tuntest.ChannelTUN, *Wrapper func newFakeTUN(logf logger.Logf, secure bool) (*fakeTUN, *Wrapper) { ftun := NewFake() - tun := Wrap(logf, ftun) + reg := new(usermetric.Registry) + tun := Wrap(logf, ftun, reg) if secure { setfilter(logf, tun) } else { @@ -315,15 +319,15 @@ func mustHexDecode(s string) []byte { } func TestFilter(t *testing.T) { - // Reset the metrics before test. These are global - // so the different tests might have affected them. - metricInboundDroppedPacketsTotal.SetInt(dropPacketLabel{Reason: DropReasonACL}, 0) - metricInboundDroppedPacketsTotal.SetInt(dropPacketLabel{Reason: DropReasonError}, 0) - metricOutboundDroppedPacketsTotal.SetInt(dropPacketLabel{Reason: DropReasonACL}, 0) chtun, tun := newChannelTUN(t.Logf, true) defer tun.Close() + // Reset the metrics before test. These are global + // so the different tests might have affected them. + tun.metrics.inboundDroppedPacketsTotal.ResetAllForTest() + tun.metrics.outboundDroppedPacketsTotal.ResetAllForTest() + type direction int const ( @@ -436,20 +440,26 @@ func TestFilter(t *testing.T) { }) } - inACL := metricInboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}) - inError := metricInboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonError}) - outACL := metricOutboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}) - - assertMetricPackets(t, "inACL", "3", inACL.String()) - assertMetricPackets(t, "inError", "0", inError.String()) - assertMetricPackets(t, "outACL", "1", outACL.String()) + var metricInboundDroppedPacketsACL, metricInboundDroppedPacketsErr, metricOutboundDroppedPacketsACL int64 + if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}).(*expvar.Int); ok { + metricInboundDroppedPacketsACL = m.Value() + } + if m, ok := tun.metrics.inboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonError}).(*expvar.Int); ok { + metricInboundDroppedPacketsErr = m.Value() + } + if m, ok := tun.metrics.outboundDroppedPacketsTotal.Get(dropPacketLabel{Reason: DropReasonACL}).(*expvar.Int); ok { + metricOutboundDroppedPacketsACL = m.Value() + } + assertMetricPackets(t, "inACL", 3, metricInboundDroppedPacketsACL) + assertMetricPackets(t, "inError", 0, metricInboundDroppedPacketsErr) + assertMetricPackets(t, "outACL", 1, metricOutboundDroppedPacketsACL) } -func assertMetricPackets(t *testing.T, metricName, want, got string) { +func assertMetricPackets(t *testing.T, metricName string, want, got int64) { t.Helper() if want != got { - t.Errorf("%s got unexpected value, got %s, want %s", metricName, got, want) + t.Errorf("%s got unexpected value, got %d, want %d", metricName, got, want) } } @@ -512,6 +522,7 @@ func TestAtomic64Alignment(t *testing.T) { } func TestPeerAPIBypass(t *testing.T) { + reg := new(usermetric.Registry) wrapperWithPeerAPI := &Wrapper{ PeerAPIPort: func(ip netip.Addr) (port uint16, ok bool) { if ip == netip.MustParseAddr("100.64.1.2") { @@ -519,6 +530,7 @@ func TestPeerAPIBypass(t *testing.T) { } return }, + metrics: registerMetrics(reg), } tests := []struct { @@ -534,13 +546,16 @@ func TestPeerAPIBypass(t *testing.T) { PeerAPIPort: func(netip.Addr) (port uint16, ok bool) { return 60000, true }, + metrics: registerMetrics(reg), }, pkt: tcp4syn("1.2.3.4", "100.64.1.2", 1234, 60000), want: filter.Drop, }, { - name: "reject_with_filter", - w: &Wrapper{}, + name: "reject_with_filter", + w: &Wrapper{ + metrics: registerMetrics(reg), + }, filter: filter.NewAllowNone(logger.Discard, new(netipx.IPSet)), pkt: tcp4syn("1.2.3.4", "100.64.1.2", 1234, 60000), want: filter.Drop, diff --git a/publicapi/device.md b/publicapi/device.md deleted file mode 100644 index 6ba1ebf778842..0000000000000 --- a/publicapi/device.md +++ /dev/null @@ -1,894 +0,0 @@ -> [!IMPORTANT] -> The Tailscale API documentation has moved to https://tailscale.com/api - -# Device - -A Tailscale device (sometimes referred to as _node_ or _machine_), is any computer or mobile device that joins a tailnet. - -Each device has a unique ID (`nodeId` in the JSON below) that is used to identify the device in API calls. -This ID can be found by going to the [**Machines**](https://login.tailscale.com/admin/machines) page in the admin console, -selecting the relevant device, then finding the ID in the Machine Details section. -You can also [list all devices in the tailnet](#list-tailnet-devices) to get their `nodeId` values. - -(A device's numeric `id` value can also be used in API calls, but `nodeId` is preferred.) - -### Attributes - -```jsonc -{ - // addresses (array of strings) is a list of Tailscale IP - // addresses for the device, including both IPv4 (formatted as 100.x.y.z) - // and IPv6 (formatted as fd7a:115c:a1e0:a:b:c:d:e) addresses. - "addresses": ["100.87.74.78", "fd7a:115c:a1e0:ac82:4843:ca90:697d:c36e"], - - // id (string) is the legacy identifier for a device; you - // can supply this value wherever {deviceId} is indicated in the - // endpoint. Note that although "id" is still accepted, "nodeId" is - // preferred. - "id": "393735751060", - - // nodeID (string) is the preferred identifier for a device; - // supply this value wherever {deviceId} is indicated in the endpoint. - "nodeId": "n5SUKe8CNTRL", - - // user (string) is the user who registered the node. For untagged nodes, - // this user is the device owner. - "user": "amelie@example.com", - - // name (string) is the MagicDNS name of the device. - // Learn more about MagicDNS at https://tailscale.com/kb/1081/. - "name": "pangolin.tailfe8c.ts.net", - - // hostname (string) is the machine name in the admin console - // Learn more about machine names at https://tailscale.com/kb/1098/. - "hostname": "pangolin", - - // clientVersion (string) is the version of the Tailscale client - // software; this is empty for external devices. - "clientVersion": "", - - // updateAvailable (boolean) is 'true' if a Tailscale client version - // upgrade is available. This value is empty for external devices. - "updateAvailable": false, - - // os (string) is the operating system that the device is running. - "os": "linux", - - // created (string) is the date on which the device was added - // to the tailnet; this is empty for external devices. - "created": "2022-12-01T05:23:30Z", - - // lastSeen (string) is when device was last active on the tailnet. - "lastSeen": "2022-12-01T05:23:30Z", - - // keyExpiryDisabled (boolean) is 'true' if the keys for the device - // will not expire. Learn more at https://tailscale.com/kb/1028/. - "keyExpiryDisabled": true, - - // expires (string) is the expiration date of the device's auth key. - // Learn more about key expiry at https://tailscale.com/kb/1028/. - "expires": "2023-05-30T04:44:05Z", - - // authorized (boolean) is 'true' if the device has been - // authorized to join the tailnet; otherwise, 'false'. Learn - // more about device authorization at https://tailscale.com/kb/1099/. - "authorized": true, - - // isExternal (boolean) if 'true', indicates that a device is not - // a member of the tailnet, but is shared in to the tailnet; - // if 'false', the device is a member of the tailnet. - // Learn more about node sharing at https://tailscale.com/kb/1084/. - "isExternal": true, - - // machineKey (string) is for internal use and is not required for - // any API operations. This value is empty for external devices. - "machineKey": "", - - // nodeKey (string) is mostly for internal use, required for select - // operations, such as adding a node to a locked tailnet. - // Learn about tailnet locks at https://tailscale.com/kb/1226/. - "nodeKey": "nodekey:01234567890abcdef", - - // blocksIncomingConnections (boolean) is 'true' if the device is not - // allowed to accept any connections over Tailscale, including pings. - // Learn more in the "Allow incoming connections" - // section of https://tailscale.com/kb/1072/. - "blocksIncomingConnections": false, - - // enabledRoutes (array of strings) are the subnet routes for this - // device that have been approved by the tailnet admin. - // Learn more about subnet routes at https://tailscale.com/kb/1019/. - "enabledRoutes": ["10.0.0.0/16", "192.168.1.0/24"], - - // advertisedRoutes (array of strings) are the subnets this device - // intends to expose. - // Learn more about subnet routes at https://tailscale.com/kb/1019/. - "advertisedRoutes": ["10.0.0.0/16", "192.168.1.0/24"], - - // clientConnectivity provides a report on the device's current physical - // network conditions. - "clientConnectivity": { - // endpoints (array of strings) Client's magicsock UDP IP:port - // endpoints (IPv4 or IPv6) - "endpoints": ["199.9.14.201:59128", "192.68.0.21:59128"], - - // mappingVariesByDestIP (boolean) is 'true' if the host's NAT mappings - // vary based on the destination IP. - "mappingVariesByDestIP": false, - - // latency (JSON object) lists DERP server locations and their current - // latency; "preferred" is 'true' for the node's preferred DERP - // server for incoming traffic. - "latency": { - "Dallas": { - "latencyMs": 60.463043 - }, - "New York City": { - "preferred": true, - "latencyMs": 31.323811 - } - }, - - // clientSupports (JSON object) identifies features supported by the client. - "clientSupports": { - // hairpinning (boolean) is 'true' if your router can route connections - // from endpoints on your LAN back to your LAN using those endpoints’ - // globally-mapped IPv4 addresses/ports - "hairPinning": false, - - // ipv6 (boolean) is 'true' if the device OS supports IPv6, - // regardless of whether IPv6 internet connectivity is available. - "ipv6": false, - - // pcp (boolean) is 'true' if PCP port-mapping service exists on - // your router. - "pcp": false, - - // pmp (boolean) is 'true' if NAT-PMP port-mapping service exists - // on your router. - "pmp": false, - - // udp (boolean) is 'true' if UDP traffic is enabled on the - // current network; if 'false', Tailscale may be unable to make - // direct connections, and will rely on our DERP servers. - "udp": true, - - // upnp (boolean) is 'true' if UPnP port-mapping service exists - // on your router. - "upnp": false - } - }, - - // tags (array of strings) let you assign an identity to a device that - // is separate from human users, and use it as part of an ACL to restrict - // access. Once a device is tagged, the tag is the owner of that device. - // A single node can have multiple tags assigned. This value is empty for - // external devices. - // Learn more about tags at https://tailscale.com/kb/1068/. - "tags": ["tag:golink"], - - // tailnetLockError (string) indicates an issue with the tailnet lock - // node-key signature on this device. - // This field is only populated when tailnet lock is enabled. - "tailnetLockError": "", - - // tailnetLockKey (string) is the node's tailnet lock key. Every node - // generates a tailnet lock key (so the value will be present) even if - // tailnet lock is not enabled. - // Learn more about tailnet lock at https://tailscale.com/kb/1226/. - "tailnetLockKey": "", - - // postureIdentity contains extra identifiers from the device when the tailnet - // it is connected to has device posture identification collection enabled. - // If the device has not opted-in to posture identification collection, this - // will contain {"disabled": true}. - // Learn more about posture identity at https://tailscale.com/kb/1326/device-identity - "postureIdentity": { - "serialNumbers": ["CP74LFQJXM"] - } -} -``` - -# APIs - -**[Device](#device)** - -- Get a device: [`GET /api/v2/device/{deviceid}`](#get-device) -- Delete a device: [`DELETE /api/v2/device/{deviceID}`](#delete-device) -- Expire device key: [`POST /api/v2/device/{deviceID}/expire`](#expire-device-key) -- [**Routes**](#routes) - - Get device routes: [`GET /api/v2/device/{deviceID}/routes`](#get-device-routes) - - Set device routes: [`POST /api/v2/device/{deviceID}/routes`](#set-device-routes) -- [**Authorize**](#authorize) - - Authorize a device: [`POST /api/v2/device/{deviceID}/authorized`](#authorize-device) -- [**Tags**](#tags) - - Update tags: [`POST /api/v2/device/{deviceID}/tags`](#update-device-tags) -- [**Keys**](#keys) - - Update device key: [`POST /api/v2/device/{deviceID}/key`](#update-device-key) -- [**IP Addresses**](#ip-addresses) - - Set device IPv4 address: [`POST /api/v2/device/{deviceID}/ip`](#set-device-ipv4-address) -- [**Device posture attributes**](#device-posture-attributes) - - Get device posture attributes: [`GET /api/v2/device/{deviceID}/attributes`](#get-device-posture-attributes) - - Set custom device posture attributes: [`POST /api/v2/device/{deviceID}/attributes/{attributeKey}`](#set-device-posture-attributes) - - Delete custom device posture attributes: [`DELETE /api/v2/device/{deviceID}/attributes/{attributeKey}`](#delete-custom-device-posture-attributes) -- [**Device invites**](#invites-to-a-device) - - List device invites: [`GET /api/v2/device/{deviceID}/device-invites`](#list-device-invites) - - Create device invites: [`POST /api/v2/device/{deviceID}/device-invites`](#create-device-invites) - -### Subnet routes - -Devices within a tailnet can be set up as subnet routers. -A subnet router acts as a gateway, relaying traffic from your Tailscale network onto your physical subnet. -Setting up subnet routers exposes routes to other devices in the tailnet. -Learn more about [subnet routers](https://tailscale.com/kb/1019). - -A device can act as a subnet router if its subnet routes are both advertised and enabled. -This is a two-step process, but the steps can occur in any order: - -- The device that intends to act as a subnet router exposes its routes by **advertising** them. - This is done in the Tailscale command-line interface. -- The tailnet admin must approve the routes by **enabling** them. - This is done in the [**Machines**](https://login.tailscale.com/admin/machines) page of the Tailscale admin console - or [via the API](#set-device-routes). - -If a device has advertised routes, they are not exposed to traffic until they are enabled by the tailnet admin. -Conversely, if a tailnet admin pre-approves certain routes by enabling them, they are not available for routing until the device in question has advertised them. - -The API exposes two methods for dealing with subnet routes: - -- Get routes: [`GET /api/v2/device/{deviceID}/routes`](#get-device-routes) to fetch lists of advertised and enabled routes for a device -- Set routes: [`POST /api/v2/device/{deviceID}/routes`](#set-device-routes) to set enabled routes for a device - -## Get device - -```http -GET /api/v2/device/{deviceid} -``` - -Retrieve the details for the specified device. -This returns a JSON `device` object listing device attributes. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -#### `fields` (optional in query string) - -Controls whether the response returns **all** object fields or only a predefined subset of fields. -Currently, there are two supported options: - -- **`all`:** return all object fields in the response -- **`default`:** return all object fields **except**: - - `enabledRoutes` - - `advertisedRoutes` - - `clientConnectivity` (which contains the following fields: `mappingVariesByDestIP`, `derp`, `endpoints`, `latency`, and `clientSupports`) - - `postureIdentity` - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/12345?fields=all" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "addresses":[ - "100.71.74.78", - "fd7a:115c:a1e0:ac82:4843:ca90:697d:c36e" - ], - "id":"12345", - - // Additional fields as documented in device "Attributes" section above -} -{ - "addresses":[ - "100.74.66.78", - "fd7a:115c:a1e0:ac82:4843:ca90:697d:c36f" - ], - "id":"67890", - - // Additional fields as documented in device "Attributes" section above -} -``` - -## Delete device - -```http -DELETE /api/v2/device/{deviceID} -``` - -Deletes the supplied device from its tailnet. -The device must belong to the user's tailnet. -Deleting shared/external devices is not supported. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -### Request example - -```sh -curl -X DELETE 'https://api.tailscale.com/api/v2/device/12345' \ - -u "tskey-api-xxxxx:" -``` - -### Response - -If successful, the response should be empty: - -```http -HTTP/1.1 200 OK -``` - -If the device is not owned by your tailnet: - -```http -HTTP/1.1 501 Not Implemented -... -{"message":"cannot delete devices outside of your tailnet"} -``` - -## Expire a device's key - -```http -POST /api/v2/device/{deviceID}/expire -``` - -Mark a device's node key as expired. -This will require the device to re-authenticate in order to connect to the tailnet. -The device must belong to the requesting user's tailnet. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -### Request example - -```sh -curl -X POST 'https://api.tailscale.com/api/v2/device/12345/expire' \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" -``` - -### Response - -If successful, the response should be empty: - -```http -HTTP/1.1 200 OK -``` - -## Routes - -## Get device routes - -```http -GET /api/v2/device/{deviceID}/routes -``` - -Retrieve the list of [subnet routes](#subnet-routes) that a device is advertising, as well as those that are enabled for it: - -- **Enabled routes:** The subnet routes for this device that have been approved by the tailnet admin. -- **Advertised routes:** The subnets this device intends to expose. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/11055/routes" \ --u "tskey-api-xxxxx:" -``` - -### Response - -Returns the enabled and advertised subnet routes for a device. - -```jsonc -{ - "advertisedRoutes": ["10.0.0.0/16", "192.168.1.0/24"], - "enabledRoutes": [] -} -``` - -## Set device routes - -```http -POST /api/v2/device/{deviceID}/routes -``` - -Sets a device's enabled [subnet routes](#subnet-routes) by replacing the existing list of subnet routes with the supplied parameters. -Advertised routes cannot be set through the API, since they must be set directly on the device. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -#### `routes` (required in `POST` body) - -The new list of enabled subnet routes. - -```jsonc -{ - "routes": ["10.0.0.0/16", "192.168.1.0/24"] -} -``` - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/11055/routes" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '{"routes": ["10.0.0.0/16", "192.168.1.0/24"]}' -``` - -### Response - -Returns the enabled and advertised subnet routes for a device. - -```jsonc -{ - "advertisedRoutes": ["10.0.0.0/16", "192.168.1.0/24"], - "enabledRoutes": ["10.0.0.0/16", "192.168.1.0/24"] -} -``` - -## Authorize - -## Authorize device - -```http -POST /api/v2/device/{deviceID}/authorized -``` - -Authorize a device. -This call marks a device as authorized or revokes its authorization for tailnets where device authorization is required, according to the `authorized` field in the payload. - -This returns a successful 2xx response with an empty JSON object in the response body. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -#### `authorized` (required in `POST` body) - -Specify whether the device is authorized. False to deauthorize an authorized device, and true to authorize a new device or to re-authorize a previously deauthorized device. - -```jsonc -{ - "authorized": true -} -``` - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/11055/authorized" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '{"authorized": true}' -``` - -### Response - -The response is 2xx on success. The response body is currently an empty JSON object. - -## Tags - -## Update device tags - -```http -POST /api/v2/device/{deviceID}/tags -``` - -Update the tags set on a device. -Tags let you assign an identity to a device that is separate from human users, and use that identity as part of an ACL to restrict access. -Tags are similar to role accounts, but more flexible. - -Tags are created in the tailnet policy file by defining the tag and an owner of the tag. -Once a device is tagged, the tag is the owner of that device. -A single node can have multiple tags assigned. - -Consult the policy file for your tailnet in the [admin console](https://login.tailscale.com/admin/acls) for the list of tags that have been created for your tailnet. -Learn more about [tags](https://tailscale.com/kb/1068/). - -This returns a 2xx code if successful, with an empty JSON object in the response body. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -#### `tags` (required in `POST` body) - -The new list of tags for the device. - -```jsonc -{ - "tags": ["tag:foo", "tag:bar"] -} -``` - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/11055/tags" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '{"tags": ["tag:foo", "tag:bar"]}' -``` - -### Response - -The response is 2xx on success. The response body is currently an empty JSON object. - -If the tags supplied in the `POST` call do not exist in the tailnet policy file, the response is '400 Bad Request': - -```jsonc -{ - "message": "requested tags [tag:madeup tag:wrongexample] are invalid or not permitted" -} -``` - -## Keys - -## Update device key - -```http -POST /api/v2/device/{deviceID}/key -``` - -Update properties of the device key. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -#### `keyExpiryDisabled` (optional in `POST` body) - -Disable or enable the expiry of the device's node key. - -When a device is added to a tailnet, its key expiry is set according to the tailnet's [key expiry](https://tailscale.com/kb/1028/) setting. -If the key is not refreshed and expires, the device can no longer communicate with other devices in the tailnet. - -Set `"keyExpiryDisabled": true` to disable key expiry for the device and allow it to rejoin the tailnet (for example to access an accidentally expired device). -You can then call this method again with `"keyExpiryDisabled": false` to re-enable expiry. - -```jsonc -{ - "keyExpiryDisabled": true -} -``` - -- If `true`, disable the device's key expiry. - The original key expiry time is still maintained. - Upon re-enabling, the key will expire at that original time. -- If `false`, enable the device's key expiry. - Sets the key to expire at the original expiry time prior to disabling. - The key may already have expired. In that case, the device must be re-authenticated. -- Empty value will not change the key expiry. - -This returns a 2xx code on success, with an empty JSON object in the response body. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/11055/key" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '{"keyExpiryDisabled": true}' -``` - -### Response - -The response is 2xx on success. The response body is currently an empty JSON object. - -## IP Addresses - -## Set device IPv4 address - -```http -POST /api/v2/device/{deviceID}/ip -``` - -Set the Tailscale IPv4 address of the device. - -### Parameters - -#### `deviceid` (required in URL path) - -The ID of the device. - -#### `ipv4` (optional in `POST` body) - -Provide a new IPv4 address for the device. - -When a device is added to a tailnet, its Tailscale IPv4 address is set at random either from the CGNAT range, or a subset of the CGNAT range specified by an [ip pool](https://tailscale.com/kb/1304/ip-pool). -This endpoint can be used to replace the existing IPv4 address with a specific value. - -```jsonc -{ - "ipv4": "100.80.0.1" -} -``` - -This action will break any existing connections to this machine. -You will need to reconnect to this machine using the new IP address. -You may also need to flush your DNS cache. - -This returns a 2xx code on success, with an empty JSON object in the response body. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device/11055/ip" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '{"ipv4": "100.80.0.1"}' -``` - -### Response - -The response is 2xx on success. The response body is currently an empty JSON object. - -## Device posture attributes - -## Get device posture attributes - -The posture attributes API endpoints can be called with OAuth access tokens with -an `acl` or `devices` [scope](https://tailscale.com/kb/1215/oauth-clients#scopes), or personal access belonging to -[user roles](https://tailscale.com/kb/1138/user-roles) Owners, Admins, Network Admins, or IT Admins. - -``` -GET /api/v2/device/{deviceID}/attributes -``` - -Retrieve all posture attributes for the specified device. This returns a JSON object of all the key-value pairs of posture attributes for the device. - -### Parameters - -#### `deviceID` (required in URL path) - -The ID of the device to fetch posture attributes for. - -### Request example - -``` -curl "https://api.tailscale.com/api/v2/device/11055/attributes" \ --u "tskey-api-xxxxx:" -``` - -### Response - -The response is 200 on success. The response body is a JSON object containing all the posture attributes assigned to the node. Attribute values can be strings, numbers or booleans. - -```json -{ - "attributes": { - "custom:myScore": 87, - "custom:diskEncryption": true, - "custom:myAttribute": "my_value", - "node:os": "linux", - "node:osVersion": "5.19.0-42-generic", - "node:tsReleaseTrack": "stable", - "node:tsVersion": "1.40.0", - "node:tsAutoUpdate": false - } -} -``` - -## Set custom device posture attributes - -``` -POST /api/v2/device/{deviceID}/attributes/{attributeKey} -``` - -Create or update a custom posture attribute on the specified device. User-managed attributes must be in the `custom` namespace, which is indicated by prefixing the attribute key with `custom:`. - -Custom device posture attributes are available for the Personal and Enterprise plans. - -### Parameters - -#### `deviceID` (required in URL path) - -The ID of the device on which to set the custom posture attribute. - -#### `attributeKey` (required in URL path) - -The name of the posture attribute to set. This must be prefixed with `custom:`. - -Keys have a maximum length of 50 characters including the namespace, and can only contain letters, numbers, underscores, and colon. - -Keys are case-sensitive. Keys must be unique, but are checked for uniqueness in a case-insensitive manner. For example, `custom:MyAttribute` and `custom:myattribute` cannot both be set within a single tailnet. - -All values for a given key need to be of the same type, which is determined when the first value is written for a given key. For example, `custom:myattribute` cannot have a numeric value (`87`) for one node and a string value (`"78"`) for another node within the same tailnet. - -### Posture attribute `value` (required in POST body) - -```json -{ - "value": "foo" -} -``` - -A value can be either a string, number or boolean. - -A string value can have a maximum length of 50 characters, and can only contain letters, numbers, underscores, and periods. - -A number value is an integer and must be a JSON safe number (up to 2^53 - 1). - -### Request example - -``` -curl "https://api.tailscale.com/api/v2/device/11055/attributes/custom:my_attribute" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '{"value": "my_value"}' -``` - -### Response - -The response is 2xx on success. The response body is currently an empty JSON object. - -## Delete custom device posture attributes - -``` -DELETE /api/v2/device/{deviceID}/attributes/{attributeKey} -``` - -Delete a posture attribute from the specified device. This is only applicable to user-managed posture attributes in the `custom` namespace, which is indicated by prefixing the attribute key with `custom:`. - - - -### Parameters - -#### `deviceID` (required in URL path) - -The ID of the device from which to delete the posture attribute. - -#### `attributeKey` (required in URL path) - -The name of the posture attribute to delete. This must be prefixed with `custom:`. - -Keys have a maximum length of 50 characters including the namespace, and can only contain letters, numbers, underscores, and a delimiting colon. - -### Request example - -``` -curl -X DELETE "https://api.tailscale.com/api/v2/device/11055/attributes/custom:my_attribute" \ --u "tskey-api-xxxxx:" -``` - -### Response - -The response is 2xx on success. The response body is currently an empty JSON object. - -## Invites to a device - -The device sharing invite methods let you create and list [invites to share a device](https://tailscale.com/kb/1084/sharing). - -## List device invites - -```http -GET /api/v2/device/{deviceID}/device-invites -``` - -List all share invites for a device. - -### Parameters - -#### `deviceID` (required in URL path) - -The ID of the device. - -### Request example - -```sh -curl -X GET "https://api.tailscale.com/api/v2/device/11055/device-invites" \ --u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -[ - { - "id": "12345", - "created": "2024-05-08T20:19:51.777861756Z", - "tailnetId": 59954, - "deviceId": 11055, - "sharerId": 22011, - "allowExitNode": true, - "email": "user@example.com", - "lastEmailSentAt": "2024-05-08T20:19:51.777861756Z", - "inviteUrl": "https://login.tailscale.com/admin/invite/", - "accepted": false - }, - { - "id": "12346", - "created": "2024-04-03T21:38:49.333829261Z", - "tailnetId": 59954, - "deviceId": 11055, - "sharerId": 22012, - "inviteUrl": "https://login.tailscale.com/admin/invite/", - "accepted": true, - "acceptedBy": { - "id": 33223, - "loginName": "someone@example.com", - "profilePicUrl": "" - } - } -] -``` - -## Create device invites - -```http -POST /api/v2/device/{deviceID}/device-invites -``` - -Create new share invites for a device. - -### Parameters - -#### `deviceID` (required in URL path) - -The ID of the device. - -#### List of invite requests (required in `POST` body) - -Each invite request is an object with the following optional fields: - -- **`multiUse`:** (Optional) Specify whether the invite can be accepted more than once. When set to `true`, it results in an invite that can be accepted up to 1,000 times. -- **`allowExitNode`:** (Optional) Specify whether the invited user can use the device as an exit node when it advertises as one. -- **`email`:** (Optional) Specify the email to send the created invite. If not set, the endpoint generates and returns an invite URL (but doesn't send it out). - -### Request example - -```sh -curl -X POST "https://api.tailscale.com/api/v2/device/11055/device-invites" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '[{"multiUse": true, "allowExitNode": true, "email":"user@example.com"}]' -``` - -### Response - -```jsonc -[ - { - "id": "12347", - "created": "2024-05-08T20:29:45.842358533Z", - "tailnetId": 59954, - "deviceId": 11055, - "sharerId": 22012, - "multiUse": true, - "allowExitNode": true, - "email": "user@example.com", - "lastEmailSentAt": "2024-05-08T20:29:45.842358533Z", - "inviteUrl": "https://login.tailscale.com/admin/invite/", - "accepted": false - } -] -``` diff --git a/publicapi/deviceinvites.md b/publicapi/deviceinvites.md deleted file mode 100644 index 5a7587437fdd7..0000000000000 --- a/publicapi/deviceinvites.md +++ /dev/null @@ -1,224 +0,0 @@ -> [!IMPORTANT] -> The Tailscale API documentation has moved to https://tailscale.com/api - -# Device invites - -A device invite is an invitation that shares a device with an external user (a user not in the device's tailnet). - -Each device invite has a unique ID that is used to identify the invite in API calls. -You can find all device invite IDs for a particular device by [listing all device invites for a device](#list-device-invites). - -### Attributes - -```jsonc -{ - // id (strings) is the unique identifier for the invite. - // Supply this value wherever {deviceInviteId} is indicated in the endpoint. - "id": "12346", - - // created is the creation time of the invite. - "created": "2024-04-03T21:38:49.333829261Z", - - // tailnetId is the ID of the tailnet to which the shared device belongs. - "tailnetId": 59954, - - // deviceId is the ID of the device being shared. - "deviceId": 11055, - - // sharerId is the ID of the user who created the share invite. - "sharerId": 22012, - - // multiUse specifies whether this device invite can be accepted more than - // once. - "multiUse": false, - - // allowExitNode specifies whether the invited user is able to use the - // device as an exit node when the device is advertising as one. - "allowExitNode": true, - - // email is the email to which the invite was sent. - // If empty, the invite was not emailed to anyone, but the inviteUrl can be - // shared manually. - "email": "user@example.com", - - // lastEmailSentAt is the last time the invite was attempted to be sent to - // Email. Only ever set if Email is not empty. - "lastEmailSentAt": "2024-04-03T21:38:49.333829261Z", - - // inviteUrl is the link to accept the invite. - // Anyone with this link can accept the invite. - // It is not restricted to the person to which the invite was emailed. - "inviteUrl": "https://login.tailscale.com/admin/invite/", - - // accepted is true when share invite has been accepted. - "accepted": true, - - // acceptedBy is set when the invite has been accepted. - // It holds information about the user who accepted the share invite. - "acceptedBy": { - // id is the ID of the user who accepted the share invite. - "id": 33223, - - // loginName is the login name of the user who accepted the share invite. - "loginName": "someone@example.com", - - // profilePicUrl is optionally the profile pic URL for the user who accepted - // the share invite. - "profilePicUrl": "" - } -} -``` - -# API - -**[Device invites](#device-invites)** - -- Get device invite: [`GET /api/v2/device-invites/{deviceInviteId}`](#get-device-invite) -- Delete device invite: [`DELETE /api/v2/device-invites/{deviceInviteId}`](#delete-device-invite) -- Resend device invite (by email): [`POST /api/v2/device-invites/{deviceInviteId}/resend`](#resend-device-invite) -- Accept device invite [`POST /api/v2/device-invites/-/accept`](#accept-device-invite) - -## Get device invite - -```http -GET /api/v2/device-invites/{deviceInviteId} -``` - -Retrieve the specified device invite. - -### Parameters - -#### `deviceInviteId` (required in URL path) - -The ID of the device share invite. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/device-invites/12346" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "id": "12346", - "created": "2024-04-03T21:38:49.333829261Z", - "tailnetId": 59954, - "deviceId": 11055, - "sharerId": 22012, - "multiUse": true, - "allowExitNode": true, - "email": "user@example.com", - "lastEmailSentAt": "2024-04-03T21:38:49.333829261Z", - "inviteUrl": "https://login.tailscale.com/admin/invite/", - "accepted": false -} -``` - -## Delete device invite - -```http -DELETE /api/v2/device-invites/{deviceInviteId} -``` - -Delete the specified device invite. - -### Parameters - -#### `deviceInviteId` (required in URL path) - -The ID of the device share invite. - -### Request example - -```sh -curl -X DELETE "https://api.tailscale.com/api/v2/device-invites/12346" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -The response is 2xx on success. The response body is an empty JSON object. - -## Resend device invite - -```http -POST /api/v2/device-invites/{deviceInviteId}/resend -``` - -Resend the specified device invite by email. You can only use this if the specified invite was originally created with an email specified. Refer to [creating device invites for a device](#create-device-invites). - -Note: Invite resends are rate limited to one per minute. - -### Parameters - -#### `deviceInviteId` (required in URL path) - -The ID of the device share invite. - -### Request example - -```sh -curl -X POST "https://api.tailscale.com/api/v2/device-invites/12346/resend" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -The response is 2xx on success. The response body is an empty JSON object. - -## Accept device invite - -```http -POST /api/v2/device-invites/-/accept -``` - -Resend the specified device invite by email. This can only be used if the specified invite was originally created with an email specified. -See [creating device invites for a device](#create-device-invites). - -Note that invite resends are rate limited to once per minute. - -### Parameters - -#### `invite` (required in `POST` body) - -The URL of the invite (in the form "https://login.tailscale.com/admin/invite/{code}") or the "{code}" component of the URL. - -### Request example - -```sh -curl -X POST "https://api.tailscale.com/api/v2/device-invites/-/accept" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '[{"invite": "https://login.tailscale.com/admin/invite/xxxxxx"}]' -``` - -### Response - -```jsonc -{ - "device": { - "id": "11055", - "os": "iOS", - "name": "my-phone", - "fqdn": "my-phone.something.ts.net", - "ipv4": "100.x.y.z", - "ipv6": "fd7a:115c:x::y:z", - "includeExitNode": false - }, - "sharer": { - "id": "22012", - "displayName": "Some User", - "loginName": "someuser@example.com", - "profilePicURL": "" - }, - "acceptedBy": { - "id": "33233", - "displayName": "Another User", - "loginName": "anotheruser@exmaple2.com", - "profilePicURL": "" - } -} -``` diff --git a/publicapi/readme.md b/publicapi/readme.md deleted file mode 100644 index 12b0ea2cee261..0000000000000 --- a/publicapi/readme.md +++ /dev/null @@ -1,121 +0,0 @@ -> [!IMPORTANT] -> The Tailscale API documentation has moved to https://tailscale.com/api - -# Tailscale API - -The Tailscale API is a (mostly) RESTful API. Typically, both `POST` bodies and responses are JSON-encoded. - -## Base URL - -The base URL for the Tailscale API is `https://api.tailscale.com/api/v2/`. - -Examples in this document may abbreviate this to `/api/v2/`. - -## Authentication - -Requests to the Tailscale API are authenticated with an API access token (sometimes called an API key). -Access tokens can be supplied as the username portion of HTTP Basic authentication (leave the password blank) or as an OAuth Bearer token: - -```sh -# passing token with basic auth -curl -u "tskey-api-xxxxx:" https://api.tailscale.com/api/v2/... - -# passing token as bearer token -curl -H "Authorization: Bearer tskey-api-xxxxx" https://api.tailscale.com/api/v2/... -``` - -Access tokens for individual users can be created and managed from the [**Keys**](https://login.tailscale.com/admin/settings/keys) page of the admin console. -These tokens will have the same permissions as the owning user, and can be set to expire in 1 to 90 days. -Access tokens are identifiable by the prefix `tskey-api-`. - -Alternatively, an OAuth client can be used to create short-lived access tokens with scoped permission. -OAuth clients don't expire, and can therefore be used to provide ongoing access to the API, creating access tokens as needed. -OAuth clients and the access tokens they create are not tied to an individual Tailscale user. -OAuth client secrets are identifiable by the prefix `tskey-client-`. -Learn more about [OAuth clients](https://tailscale.com/kb/1215/). - -## Errors - -The Tailscale API returns status codes consistent with [standard HTTP conventions](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status). -In addition to the status code, errors may include additional information in the response body: - -```jsonc -{ - "message": "additional error information" -} -``` - -## Pagination - -The Tailscale API does not currently support pagination. All results are returned at once. - -# APIs - -**[Device](./device.md#device)** - -- Get a device: [`GET /api/v2/device/{deviceid}`](./device.md#get-device) -- Delete a device: [`DELETE /api/v2/device/{deviceID}`](./device.md#delete-device) -- Expire device key: [`POST /api/v2/device/{deviceID}/expire`](./device.md#expire-device-key) -- [**Routes**](./device.md#routes) - - Get device routes: [`GET /api/v2/device/{deviceID}/routes`](./device.md#get-device-routes) - - Set device routes: [`POST /api/v2/device/{deviceID}/routes`](./device.md#set-device-routes) -- [**Authorize**](./device.md#authorize) - - Authorize a device: [`POST /api/v2/device/{deviceID}/authorized`](./device.md#authorize-device) -- [**Tags**](./device.md#tags) - - Update tags: [`POST /api/v2/device/{deviceID}/tags`](./device.md#update-device-tags) -- [**Keys**](./device.md#keys) - - Update device key: [`POST /api/v2/device/{deviceID}/key`](./device.md#update-device-key) -- [**IP Addresses**](./device.md#ip-addresses) - - Set device IPv4 address: [`POST /api/v2/device/{deviceID}/ip`](./device.md#set-device-ipv4-address) -- [**Device posture attributes**](./device.md#device-posture-attributes) - - Get device posture attributes: [`GET /api/v2/device/{deviceID}/attributes`](./device.md#get-device-posture-attributes) - - Set custom device posture attributes: [`POST /api/v2/device/{deviceID}/attributes/{attributeKey}`](./device.md#set-device-posture-attributes) - - Delete custom device posture attributes: [`DELETE /api/v2/device/{deviceID}/attributes/{attributeKey}`](./device.md#delete-custom-device-posture-attributes) -- [**Device invites**](./device.md#invites-to-a-device) - - List device invites: [`GET /api/v2/device/{deviceID}/device-invites`](./device.md#list-device-invites) - - Create device invites: [`POST /api/v2/device/{deviceID}/device-invites`](./device.md#create-device-invites) - -**[Tailnet](./tailnet.md#tailnet)** - -- [**Policy File**](./tailnet.md#policy-file) - - Get policy file: [`GET /api/v2/tailnet/{tailnet}/acl`](./tailnet.md#get-policy-file) - - Update policy file: [`POST /api/v2/tailnet/{tailnet}/acl`](./tailnet.md#update-policy-file) - - Preview rule matches: [`POST /api/v2/tailnet/{tailnet}/acl/preview`](./tailnet.md#preview-policy-file-rule-matches) - - Validate and test policy file: [`POST /api/v2/tailnet/{tailnet}/acl/validate`](./tailnet.md#validate-and-test-policy-file) -- [**Devices**](./tailnet.md#devices) - - List tailnet devices: [`GET /api/v2/tailnet/{tailnet}/devices`](./tailnet.md#list-tailnet-devices) -- [**Keys**](./tailnet.md#tailnet-keys) - - List tailnet keys: [`GET /api/v2/tailnet/{tailnet}/keys`](./tailnet.md#list-tailnet-keys) - - Create an auth key: [`POST /api/v2/tailnet/{tailnet}/keys`](./tailnet.md#create-auth-key) - - Get a key: [`GET /api/v2/tailnet/{tailnet}/keys/{keyid}`](./tailnet.md#get-key) - - Delete a key: [`DELETE /api/v2/tailnet/{tailnet}/keys/{keyid}`](./tailnet.md#delete-key) -- [**DNS**](./tailnet.md#dns) - - [**Nameservers**](./tailnet.md#nameservers) - - Get nameservers: [`GET /api/v2/tailnet/{tailnet}/dns/nameservers`](./tailnet.md#get-nameservers) - - Set nameservers: [`POST /api/v2/tailnet/{tailnet}/dns/nameservers`](./tailnet.md#set-nameservers) - - [**Preferences**](./tailnet.md#preferences) - - Get DNS preferences: [`GET /api/v2/tailnet/{tailnet}/dns/preferences`](./tailnet.md#get-dns-preferences) - - Set DNS preferences: [`POST /api/v2/tailnet/{tailnet}/dns/preferences`](./tailnet.md#set-dns-preferences) - - [**Search Paths**](./tailnet.md#search-paths) - - Get search paths: [`GET /api/v2/tailnet/{tailnet}/dns/searchpaths`](./tailnet.md#get-search-paths) - - Set search paths: [`POST /api/v2/tailnet/{tailnet}/dns/searchpaths`](./tailnet.md#set-search-paths) - - [**Split DNS**](./tailnet.md#split-dns) - - Get split DNS: [`GET /api/v2/tailnet/{tailnet}/dns/split-dns`](./tailnet.md#get-split-dns) - - Update split DNS: [`PATCH /api/v2/tailnet/{tailnet}/dns/split-dns`](./tailnet.md#update-split-dns) - - Set split DNS: [`PUT /api/v2/tailnet/{tailnet}/dns/split-dns`](./tailnet.md#set-split-dns) -- [**User invites**](./tailnet.md#tailnet-user-invites) - - List user invites: [`GET /api/v2/tailnet/{tailnet}/user-invites`](./tailnet.md#list-user-invites) - - Create user invites: [`POST /api/v2/tailnet/{tailnet}/user-invites`](./tailnet.md#create-user-invites) - -**[User invites](./userinvites.md#user-invites)** - -- Get user invite: [`GET /api/v2/user-invites/{userInviteId}`](./userinvites.md#get-user-invite) -- Delete user invite: [`DELETE /api/v2/user-invites/{userInviteId}`](./userinvites.md#delete-user-invite) -- Resend user invite (by email): [`POST /api/v2/user-invites/{userInviteId}/resend`](#resend-user-invite) - -**[Device invites](./deviceinvites.md#device-invites)** - -- Get device invite: [`GET /api/v2/device-invites/{deviceInviteId}`](./deviceinvites.md#get-device-invite) -- Delete device invite: [`DELETE /api/v2/device-invites/{deviceInviteId}`](./deviceinvites.md#delete-device-invite) -- Resend device invite (by email): [`POST /api/v2/device-invites/{deviceInviteId}/resend`](./deviceinvites.md#resend-device-invite) -- Accept device invite [`POST /api/v2/device-invites/-/accept`](#accept-device-invite) diff --git a/publicapi/tailnet.md b/publicapi/tailnet.md deleted file mode 100644 index e6fb27fbe8522..0000000000000 --- a/publicapi/tailnet.md +++ /dev/null @@ -1,1392 +0,0 @@ -> [!IMPORTANT] -> The Tailscale API documentation has moved to https://tailscale.com/api - -# Tailnet - -A tailnet is your private network, composed of all the devices on it and their configuration. -Learn more about [tailnets](https://tailscale.com/kb/1136/). - -When specifying a tailnet in the API, you can: - -- Provide a dash (`-`) to reference the default tailnet of the access token being used to make the API call. - This is the best option for most users. - Your API calls would start: - - ```sh - curl "https://api.tailscale.com/api/v2/tailnet/-/..." - ``` - -- Provide the **organization** name found on the **[General Settings](https://login.tailscale.com/admin/settings/general)** - page of the Tailscale admin console (not to be confused with the "tailnet name" found in the DNS tab). - - For example, if your organization name is `alice@gmail.com`, your API calls would start: - - ```sh - curl "https://api.tailscale.com/api/v2/tailnet/alice@gmail.com/..." - ``` - -# API - -**[Tailnet](#tailnet)** - -- [**Policy File**](#policy-file) - - Get policy file: [`GET /api/v2/tailnet/{tailnet}/acl`](#get-policy-file) - - Update policy file: [`POST /api/v2/tailnet/{tailnet}/acl`](#update-policy-file) - - Preview rule matches: [`POST /api/v2/tailnet/{tailnet}/acl/preview`](#preview-policy-file-rule-matches) - - Validate and test policy file: [`POST /api/v2/tailnet/{tailnet}/acl/validate`](#validate-and-test-policy-file) -- [**Devices**](#devices) - - List tailnet devices: [`GET /api/v2/tailnet/{tailnet}/devices`](#list-tailnet-devices) -- [**Keys**](#tailnet-keys) - - List tailnet keys: [`GET /api/v2/tailnet/{tailnet}/keys`](#list-tailnet-keys) - - Create an auth key: [`POST /api/v2/tailnet/{tailnet}/keys`](#create-auth-key) - - Get a key: [`GET /api/v2/tailnet/{tailnet}/keys/{keyid}`](#get-key) - - Delete a key: [`DELETE /api/v2/tailnet/{tailnet}/keys/{keyid}`](#delete-key) -- [**DNS**](#dns) - - [**Nameservers**](#nameservers) - - Get nameservers: [`GET /api/v2/tailnet/{tailnet}/dns/nameservers`](#get-nameservers) - - Set nameservers: [`POST /api/v2/tailnet/{tailnet}/dns/nameservers`](#set-nameservers) - - [**Preferences**](#preferences) - - Get DNS preferences: [`GET /api/v2/tailnet/{tailnet}/dns/preferences`](#get-dns-preferences) - - Set DNS preferences: [`POST /api/v2/tailnet/{tailnet}/dns/preferences`](#set-dns-preferences) - - [**Search Paths**](#search-paths) - - Get search paths: [`GET /api/v2/tailnet/{tailnet}/dns/searchpaths`](#get-search-paths) - - Set search paths: [`POST /api/v2/tailnet/{tailnet}/dns/searchpaths`](#set-search-paths) - - [**Split DNS**](#split-dns) - - Get split DNS: [`GET /api/v2/tailnet/{tailnet}/dns/split-dns`](#get-split-dns) - - Update split DNS: [`PATCH /api/v2/tailnet/{tailnet}/dns/split-dns`](#update-split-dns) - - Set split DNS: [`PUT /api/v2/tailnet/{tailnet}/dns/split-dns`](#set-split-dns) -- [**User invites**](#tailnet-user-invites) - - List user invites: [`GET /api/v2/tailnet/{tailnet}/user-invites`](#list-user-invites) - - Create user invites: [`POST /api/v2/tailnet/{tailnet}/user-invites`](#create-user-invites) - -## Policy File - -The tailnet policy file contains access control lists and related configuration. -The policy file is expressed using "[HuJSON](https://github.com/tailscale/hujson#readme)" -(human JSON, a superset of JSON that allows comments and trailing commas). -Most policy file API methods can also return regular JSON for compatibility with other tools. -Learn more about [network access controls](https://tailscale.com/kb/1018/). - -## Get Policy File - -```http -GET /api/v2/tailnet/{tailnet}/acl -``` - -Retrieves the current policy file for the given tailnet; this includes the ACL along with the rules and tests that have been defined. - -This method can return the policy file as JSON or HuJSON, depending on the `Accept` header. -The response also includes an `ETag` header, which can be optionally included when [updating the policy file](#update-policy-file) to avoid missed updates. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `Accept` (optional in request header) - -Response is encoded as JSON if `application/json` is requested, otherwise HuJSON will be returned. - -#### `details` (optional in query string) - -Request a detailed description of the tailnet policy file by providing `details=1` in the URL query string. -If using this, do not supply an `Accept` parameter in the header. - -The response will contain a JSON object with the fields: - -- **tailnet policy file:** a base64-encoded string representation of the huJSON format -- **warnings:** array of strings for syntactically valid but nonsensical entries -- **errors:** an array of strings for parsing failures - -### Request example (response in HuJSON format) - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl" \ - -u "tskey-api-xxxxx:" -``` - -### Response in HuJSON format - -On success, returns a 200 status code and the tailnet policy file in HuJSON format. -No errors or warnings are returned. - -```jsonc -... -Content-Type: application/hujson -Etag: "e0b2816b418b3f266309d94426ac7668ab3c1fa87798785bf82f1085cc2f6d9c" -... - -// Example/default ACLs for unrestricted connections. -{ - // Declare static groups of users beyond those in the identity service. - "groups": { - "group:example": ["user1@example.com", "user2@example.com"], - }, - - // Declare convenient hostname aliases to use in place of IP addresses. - "hosts": { - "example-host-1": "100.100.100.100", - }, - - // Access control lists. - "acls": [ - // Match absolutely everything. - // Comment this section out if you want to define specific restrictions. - {"action": "accept", "src": ["*"], "dst": ["*:*"]}, - ], -} - -``` - -### Request example (response in JSON format) - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl" \ - -u "tskey-api-xxxxx:" - -H "Accept: application/json" -``` - -### Response in JSON format - -On success, returns a 200 status code and the tailnet policy file in JSON format. -No errors or warnings are returned. - -```jsonc -... -Content-Type: application/json -Etag: "e0b2816b418b3f266309d94426ac7668ab3c1fa87798785bf82f1085cc2f6d9c" -... -{ - "acls" : [ - { - "action" : "accept", - "ports" : [ - "*:*" - ], - "users" : [ - "*" - ] - } - ], - "groups" : { - "group:example" : [ - "user1@example.com", - "user2@example.com" - ] - }, - "hosts" : { - "example-host-1" : "100.100.100.100" - } -} -``` - -### Request example (with details) - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl?details=1" \ - -u "tskey-api-xxxxx:" -``` - -### Response (with details) - -On success, returns a 200 status code and the tailnet policy file in a base64-encoded string representation of the huJSON format. -In addition, errors and warnings are returned. - -```sh -{ - "acl": "Ly8gUG9raW5nIGFyb3VuZCBpbiB0aGUgQVBJIGRvY3MsIGhvcGluZyB5b3UnZCBmaW5kIHNvbWV0aGluZyBnb29kLCBlaD8KLy8gV2UgbGlrZSB5b3VyIHN0eWxlISAgR28gZ3JhYiB5b3Vyc2VsZiBhIFRhaWxzY2FsZSB0LXNoaXJ0IGlmIHRoZXJlIGFyZQovLyBzdGlsbCBzb21lIGF2YWlsYWJsZS4gQnV0IHNoaGguLi4gZG9uJ3QgdGVsbCBhbnlvbmUhCi8vCi8vICAgICAgICAgICAgIGh0dHBzOi8vc3dhZy5jb20vZ2lmdC82a29mNGs1Z3B1ZW95ZDB2NXd6MHJkYmMKewoJLy8gRGVjbGFyZSBzdGF0aWMgZ3JvdXBzIG9mIHVzZXJzIGJleW9uZCB0aG9zZSBpbiB0aGUgaWRlbnRpdHkgc2VydmljZS4KCSJncm91cHMiOiB7CgkJImdyb3VwOmV4YW1wbGUiOiBbInVzZXIxQGV4YW1wbGUuY29tIiwgInVzZXIyQGV4YW1wbGUuY29tIl0sCgl9LAoKCS8vIERlY2xhcmUgY29udmVuaWVudCBob3N0bmFtZSBhbGlhc2VzIHRvIHVzZSBpbiBwbGFjZSBvZiBJUCBhZGRyZXNzZXMuCgkiaG9zdHMiOiB7CgkJImV4YW1wbGUtaG9zdC0xIjogIjEwMC4xMDAuMTAwLjEwMCIsCgl9LAoKCS8vIEFjY2VzcyBjb250cm9sIGxpc3RzLgoJImFjbHMiOiBbCgkJLy8gTWF0Y2ggYWJzb2x1dGVseSBldmVyeXRoaW5nLgoJCS8vIENvbW1lbnQgdGhpcyBzZWN0aW9uIG91dCBpZiB5b3Ugd2FudCB0byBkZWZpbmUgc3BlY2lmaWMgcmVzdHJpY3Rpb25zLgoJCXsiYWN0aW9uIjogImFjY2VwdCIsICJ1c2VycyI6IFsiKiJdLCAicG9ydHMiOiBbIio6KiJdfSwKCV0sCn0K", - "warnings": [ - "\"group:example\": user not found: \"user1@example.com\"", - "\"group:example\": user not found: \"user2@example.com\"" - ], - "errors": null -} -``` - -## Update policy file - -```http -POST /api/v2/tailnet/{tailnet}/acl` -``` - -Sets the ACL for the given tailnet. -HuJSON and JSON are both accepted inputs. -An `If-Match` header can be set to avoid missed updates. - -On success, returns the updated ACL in JSON or HuJSON according to the `Accept` header. -Otherwise, errors are returned for incorrectly defined ACLs, ACLs with failing tests on attempted updates, and mismatched `If-Match` header and ETag. - -### Parameters - -#### tailnet (required in URL path) - -The tailnet organization name. - -#### `If-Match` (optional in request header) - -This is a safety mechanism to avoid overwriting other users' updates to the tailnet policy file. - -- Set the `If-Match` value to that of the ETag header returned in a `GET` request to `/api/v2/tailnet/{tailnet}/acl`. - Tailscale compares the ETag value in your request to that of the current tailnet file and only replaces the file if there's a match. - (A mismatch indicates that another update has been made to the file.) - For example: `-H "If-Match: \"e0b2816b418\""` -- Alternately, set the `If-Match` value to `ts-default` to ensure that the policy file is replaced - _only if the current policy file is still the untouched default_ created automatically for each tailnet. - For example: `-H "If-Match: \"ts-default\""` - -#### `Accept` (optional in request header) - -Sets the return type of the updated tailnet policy file. -Response is encoded as JSON if `application/json` is requested, otherwise HuJSON will be returned. - -#### Tailnet policy file entries (required in `POST` body) - -Define the policy file in the `POST` body. -Include the entire policy file. -Note that the supplied object fully replaces your existing tailnet policy file. - -The `POST` body should be formatted as JSON or HuJSON. -Learn about the [ACL policy properties you can include in the request](https://tailscale.com/kb/1018/#tailscale-policy-syntax). - -### Request example - -```sh -POST /api/v2/tailnet/example.com/acl -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - -H "If-Match: \"e0b2816b418b3f266309d94426ac7668ab3c1fa87798785bf82f1085cc2f6d9c\"" - --data-binary '// Example/default ACLs for unrestricted connections. -{ - // Declare tests to check functionality of ACL rules. User must be a valid user with registered machines. - "tests": [ - // {"src": "user1@example.com", "accept": ["example-host-1:22"], "deny": ["example-host-2:100"]}, - ], - // Declare static groups of users beyond those in the identity service. - "groups": { - "group:example": [ "user1@example.com", "user2@example.com" ], - }, - // Declare convenient hostname aliases to use in place of IP addresses. - "hosts": { - "example-host-1": "100.100.100.100", - }, - // Access control lists. - "acls": [ - // Match absolutely everything. Comment out this section if you want - // to define specific ACL restrictions. - { "action": "accept", "users": ["*"], "ports": ["*:*"] }, - ] -}' -``` - -### Response - -A successful response returns an HTTP status of '200' and the modified tailnet policy file in JSON or HuJSON format, depending on the request header. - -```jsonc -// Example/default ACLs for unrestricted connections. -{ - // Declare tests to check functionality of ACL rules. User must be a valid user with registered machines. - "tests": [ - // {"src": "user1@example.com", "accept": ["example-host-1:22"], "deny": ["example-host-2:100"]}, - ], - // Declare static groups of users beyond those in the identity service. - "groups": { - "group:example": ["user1@example.com", "user2@example.com"] - }, - // Declare convenient hostname aliases to use in place of IP addresses. - "hosts": { - "example-host-1": "100.100.100.100" - }, - // Access control lists. - "acls": [ - // Match absolutely everything. Comment out this section if you want - // to define specific ACL restrictions. - { "action": "accept", "users": ["*"], "ports": ["*:*"] } - ] -} -``` - -### Response: failed test error - -``` -{ - "message": "test(s) failed", - "data": [ - { - "user": "user1@example.com", - "errors": [ - "address \"user2@example.com:400\": want: Accept, got: Drop" - ] - } - ] -} -``` - -## Preview policy file rule matches - -```http -POST /api/v2/tailnet/{tailnet}/acl/preview -``` - -When given a user or IP port to match against, returns the tailnet policy rules that -apply to that resource without saving the policy file to the server. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `type` (required in query string) - -Specify for which type of resource (user or IP port) matching rules are to be fetched. -Read about [previewing changes in the admin console](https://tailscale.com/kb/1018/#previewing-changes). - -- `user`: Specify `user` if the `previewFor` value is a user's email. - Note that `user` remains in the API for compatibility purposes, but has been replaced by `src` in policy files. -- `ipport`: Specify `ipport` if the `previewFor` value is an IP address and port. - Note that `ipport` remains in the API for compatibility purposes, but has been replaced by `dst` in policy files. - -#### `previewFor` (required in query string) - -- If `type=user`, provide the email of a valid user with registered machines. -- If `type=ipport`, provide an IP address + port: `10.0.0.1:80`. - -The supplied policy file is queried with this parameter to determine which rules match. - -#### Tailnet policy file (required in `POST` body) - -Provide the tailnet policy file in the `POST` body in JSON or HuJSON format. -Learn about [tailnet policy file entries](https://tailscale.com/kb/1018). - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl/preview?previewFor=user1@example.com&type=user" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '// Example/default ACLs for unrestricted connections. -{ - // Declare tests to check functionality of ACL rules. User must be a valid user with registered machines. - "tests": [ - // {"src": "user1@example.com", "accept": ["example-host-1:22"], "deny": ["example-host-2:100"]}, - ], - // Declare static groups of users beyond those in the identity service. - "groups": { - "group:example": [ "user1@example.com", "user2@example.com" ], - }, - // Declare convenient hostname aliases to use in place of IP addresses. - "hosts": { - "example-host-1": "100.100.100.100", - }, - // Access control lists. - "acls": [ - // Match absolutely everything. Comment out this section if you want - // to define specific ACL restrictions. - { "action": "accept", "users": ["*"], "ports": ["*:*"] }, - ] -}' -``` - -### Response - -A successful response returns an HTTP status of '200' and a list of rules that apply to the resource supplied as a list of matches as JSON objects. -Each match object includes: - -- `users`: array of strings indicating source entities affected by the rule -- `ports`: array of strings representing destinations that can be accessed -- `lineNumber`: integer indicating the rule's location in the policy file - -The response also echoes the `type` and `previewFor` values supplied in the request. - -```jsonc -{ - "matches": [ - { - "users": ["*"], - "ports": ["*:*"], - "lineNumber": 19 - } - ], - "type": "user", - "previewFor: "user1@example.com" -} -``` - -## Validate and test policy file - -```http -POST /api/v2/tailnet/{tailnet}/acl/validate -``` - -This method works in one of two modes, neither of which modifies your current tailnet policy file: - -- **Run ACL tests:** When the **request body contains ACL tests as a JSON array**, - Tailscale runs ACL tests against the tailnet's current policy file. - Learn more about [ACL tests](https://tailscale.com/kb/1018/#tests). -- **Validate a new policy file:** When the **request body is a JSON object**, - Tailscale interprets the body as a hypothetical new tailnet policy file with new ACLs, including any new rules and tests. - It validates that the policy file is parsable and runs tests to validate the existing rules. - -In either case, this method does not modify the tailnet policy file in any way. - -### Parameters for "Run ACL tests" mode - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### ACL tests (required in `POST` body) - -The `POST` body should be a JSON formatted array of ACL Tests. -Learn more about [tailnet policy file tests](https://tailscale.com/kb/1018/#tests). - -### Request example to run ACL tests - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl/validate" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary ' - [ - {"src": "user1@example.com", "accept": ["example-host-1:22"], "deny": ["example-host-2:100"]} - ]' -``` - -### Parameters for "Validate a new policy file" mode - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### Entire tailnet policy file (required in `POST` body) - -The `POST` body should be a JSON object with a JSON or HuJSON representation of a tailnet policy file. - -### Request example to validate a policy file - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/acl/validate" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary ' - { - "acls": [ - { "action": "accept", "src": ["100.105.106.107"], "dst": ["1.2.3.4:*"] }, - ], - "tests", [ - {"src": "100.105.106.107", "allow": ["1.2.3.4:80"]} - ], - }' -``` - -### Response - -The HTTP status code will be '200' if the request was well formed and there were no server errors, even in the case of failing tests or an invalid ACL. -Look at the response body to determine whether there was a problem within your ACL or tests: - -- If the tests are valid, an empty body or a JSON object with no `message` is returned. -- If there's a problem, the response body will be a JSON object with a non-empty `message` property and optionally additional details in `data`: - - ```jsonc - { - "message": "test(s) failed", - "data": [ - { - "user": "user1@example.com", - "errors": ["address \"2.2.2.2:22\": want: Drop, got: Accept"] - } - ] - } - ``` - -If your tailnet has [user and group provisioning](https://tailscale.com/kb/1180/sso-okta-scim/) turned on, we will also warn you about -any groups that are used in the policy file that are not being synced from SCIM. Explicitly defined groups will not trigger this warning. - -```jsonc -{ - "message": "warning(s) found", - "data": [ - { - "user": "group:unknown@example.com", - "warnings": [ - "group is not syncing from SCIM and will be ignored by rules in the policy file" - ] - } - ] -} -``` - -## Devices - -## List tailnet devices - -```http -GET /api/v2/tailnet/{tailnet}/devices -``` - -Lists the devices in a tailnet. -Optionally use the `fields` query parameter to explicitly indicate which fields are returned. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `fields` (optional in query string) - -Controls whether the response returns **all** fields or only a predefined subset of fields. -Currently, there are two supported options: - -- **`all`:** return all fields in the response -- **`default`:** return all fields **except**: - - `enabledRoutes` - - `advertisedRoutes` - - `clientConnectivity` (which contains the following fields: `mappingVariesByDestIP`, `derp`, `endpoints`, `latency`, and `clientSupports`) - -If the `fields` parameter is not supplied, then the default (limited fields) option is used. - -### Request example for default set of fields - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/devices" \ - -u "tskey-api-xxxxx:" -``` - -### Request example for all fields - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/devices?fields=all" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -On success, returns a 200 status code and a JSON array of the tailnet devices and their details. - -## Tailnet keys - -These methods operate primarily on auth keys, and in some cases on [API access tokens](#authentication). - -- Auth keys: Pre-authentication keys (or "auth keys") let you register new devices on a tailnet without needing to sign in via a web browser. - Auth keys are identifiable by the prefix `tskey-auth-`. Learn more about [auth keys](https://tailscale.com/kb/1085/). - -- API access tokens: used to [authenticate API requests](#authentication). - -If you authenticate with a user-owned API access token, all the methods on tailnet keys operate on _keys owned by that user_. -If you authenticate with an access token derived from an OAuth client, then these methods operate on _keys owned by the tailnet_. -Learn more about [OAuth clients](https://tailscale.com/kb/1215). - -The `POST /api/v2/tailnet/{tailnet}/keys` method is used to create auth keys only. -The remaining three methods operate on auth keys and API access tokens. - -### Attributes - -```jsonc -{ - // capabilities (JSON object) is a mapping of resources to permissible - // actions. - "capabilities": { - - // devices (JSON object) specifies the key's permissions over devices. - "devices": { - - // create (JSON object) specifies the key's permissions when - // creating devices. - "create": { - - // reusable (boolean) for auth keys only; reusable auth keys - // can be used multiple times to register different devices. - // Learn more about reusable auth keys at - // https://tailscale.com/kb/1085/#types-of-auth-keys - "reusable": false, - - // ephemeral (boolean) for auth keys only; ephemeral keys are - // used to connect and then clean up short-lived devices. - // Learn about ephemeral nodes at https://tailscale.com/kb/1111/. - "ephemeral": false, - - // preauthorized (boolean) for auth keys only; these are also - // referred to as "pre-approved" keys. 'true' means that devices - // registered with this key won't require additional approval from a - // tailnet admin. - // Learn about device approval at https://tailscale.com/kb/1099/. - "preauthorized": false, - - // tags (string) are the tags that will be set on devices registered - // with this key. - // Learn about tags at https://tailscale.com/kb/1068/. - "tags": [ - "tag:example" - ] - } - } - } - - // expirySeconds (int) is the duration in seconds a new key is valid. - "expirySeconds": 86400 - - // description (string) is an optional short phrase that describes what - // this key is used for. It can be a maximum of 50 alphanumeric characters. - // Hyphens and underscores are also allowed. - "description": "short description of key purpose" -} -``` - -## List tailnet keys - -```http -GET /api/v2/tailnet/{tailnet}/keys -``` - -Returns a list of active auth keys and API access tokens. The set of keys returned depends on the access token used to make the request: - -- If the API call is made with a user-owned API access token, this returns only the keys owned by that user. -- If the API call is made with an access token derived from an OAuth client, this returns all keys owned directly by the tailnet. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/keys" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -Returns a JSON object with the IDs of all active keys. - -```jsonc -{ - "keys": [ - { "id": "XXXX14CNTRL" }, - { "id": "XXXXZ3CNTRL" }, - { "id": "XXXX43CNTRL" }, - { "id": "XXXXgj1CNTRL" } - ] -} -``` - -## Create auth key - -```http -POST /api/v2/tailnet/{tailnet}/keys -``` - -Creates a new auth key in the specified tailnet. -The key will be associated with the user who owns the API access token used to make this call, -or, if the call is made with an access token derived from an OAuth client, the key will be owned by the tailnet. - -Returns a JSON object with the supplied capabilities in addition to the generated key. -The key should be recorded and kept safe and secure because it wields the capabilities specified in the request. -The identity of the key is embedded in the key itself and can be used to perform operations on the key (e.g., revoking it or retrieving information about it). -The full key can no longer be retrieved after the initial response. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### Tailnet key object (required in `POST` body) - -Supply the tailnet key attributes as a JSON object in the `POST` body following the request example below. - -At minimum, the request `POST` body must have a `capabilities` object (see below). -With nothing else supplied, such a request generates a single-use key with no tags. - -Note the following about required vs. optional values: - -- **`capabilities`:** A `capabilities` object is required and must contain `devices`. - -- **`devices`:** A `devices` object is required within `capabilities`, but can be an empty JSON object. - -- **`tags`:** Whether tags are required or optional depends on the owner of the auth key: - - - When creating an auth key _owned by the tailnet_ (using OAuth), it must have tags. - The auth tags specified for that new auth key must exactly match the tags that are on the OAuth client used to create that auth key (or they must be tags that are owned by the tags that are on the OAuth client used to create the auth key). - - When creating an auth key _owned by a user_ (using a user's access token), tags are optional. - -- **`expirySeconds`:** Optional in `POST` body. - Specifies the duration in seconds until the key should expire. - Defaults to 90 days if not supplied. - -- **`description`:** Optional in `POST` body. - A short string specifying the purpose of the key. Can be a maximum of 50 alphanumeric characters. Hyphens and spaces are also allowed. - -### Request example - -```jsonc -curl "https://api.tailscale.com/api/v2/tailnet/example.com/keys" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary ' -{ - "capabilities": { - "devices": { - "create": { - "reusable": false, - "ephemeral": false, - "preauthorized": false, - "tags": [ "tag:example" ] - } - } - }, - "expirySeconds": 86400, - "description": "dev access" -}' -``` - -### Response - -The response is a JSON object that includes the `key` value, which will only be returned once. -Record and safely store the `key` returned. -It holds the capabilities specified in the request and can no longer be retrieved by the server. - -```jsonc -{ - "id": "k123456CNTRL", - "key": "tskey-auth-k123456CNTRL-abcdefghijklmnopqrstuvwxyz", - "created": "2021-12-09T23:22:39Z", - "expires": "2022-03-09T23:22:39Z", - "revoked": "2022-03-12T23:22:39Z", - "capabilities": { - "devices": { - "create": { - "reusable": false, - "ephemeral": false, - "preauthorized": false, - "tags": ["tag:example"] - } - } - }, - "description": "dev access" -} -``` - -## Get key - -```http -GET /api/v2/tailnet/{tailnet}/keys/{keyid} -``` - -Returns a JSON object with information about a specific key, such as its creation and expiration dates and its capabilities. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `keyId` (required in URL path) - -The ID of the key. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/keys/k123456CNTRL" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -The response is a JSON object with information about the key supplied. - -```jsonc -{ - "id": "abc123456CNTRL", - "created": "2022-05-05T18:55:44Z", - "expires": "2022-08-03T18:55:44Z", - "capabilities": { - "devices": { - "create": { - "reusable": false, - "ephemeral": true, - "preauthorized": false, - "tags": ["tag:bar", "tag:foo"] - } - } - }, - "description": "dev access" -} -``` - -Response for a revoked (deleted) or expired key will have an `invalid` field set to `true`: - -```jsonc -{ - "id": "abc123456CNTRL", - "created": "2022-05-05T18:55:44Z", - "expires": "2022-08-03T18:55:44Z", - "revoked": "2023-04-01T20:50:00Z", - "invalid": true -} -``` - -## Delete key - -```http -DELETE /api/v2/tailnet/{tailnet}/keys/{keyid} -``` - -Deletes a specific key. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `keyId` (required in URL path) - -The ID of the key. The key ID can be found in the [admin console](https://login.tailscale.com/admin/settings/keys). - -### Request example - -```sh -curl -X DELETE 'https://api.tailscale.com/api/v2/tailnet/example.com/keys/k123456CNTRL' \ - -u "tskey-api-xxxxx:" -``` - -### Response - -This returns status 200 upon success. - -## DNS - -The tailnet DNS methods are provided for fetching and modifying various DNS settings for a tailnet. -These include nameservers, DNS preferences, and search paths. -Learn more about [DNS in Tailscale](https://tailscale.com/kb/1054/). - -## Nameservers - -## Get nameservers - -```http -GET /api/v2/tailnet/{tailnet}/dns/nameservers -``` - -Lists the global DNS nameservers for a tailnet. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/nameservers" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "dns": ["8.8.8.8"] -} -``` - -## Set nameservers - -```http -POST /api/v2/tailnet/{tailnet}/dns/nameservers -``` - -Replaces the list of global DNS nameservers for the given tailnet with the list supplied in the request. -Note that changing the list of DNS nameservers may also affect the status of MagicDNS (if MagicDNS is on; learn about [MagicDNS](https://tailscale.com/kb/1081). -If all nameservers have been removed, MagicDNS will be automatically disabled (until explicitly turned back on by the user). - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `dns` (required in `POST` body) - -The new list of DNS nameservers in JSON. - -```jsonc -{ - "dns": ["8.8.8.8"] -} -``` - -### Request example: adding DNS nameservers with MagicDNS on - -Adding DNS nameservers with the MagicDNS on: - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/nameservers" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"dns": ["8.8.8.8"]}' -``` - -### Response example: adding DNS nameservers, MagicDNS on - -The response is a JSON object containing the new list of nameservers and the status of MagicDNS. - -```jsonc -{ - "dns": ["8.8.8.8"], - "magicDNS": true -} -``` - -### Request example: removing all DNS nameservers, MagicDNS on - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/nameservers" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"dns": []}' -``` - -### Response example: removing all DNS nameservers with MagicDNS on - -The response is a JSON object containing the new list of nameservers and the status of MagicDNS. - -```jsonc -{ - "dns": [], - "magicDNS": false -} -``` - -## Preferences - -## Get DNS preferences - -```http -GET /api/v2/tailnet/{tailnet}/dns/preferences` -``` - -Retrieves the DNS preferences that are currently set for the given tailnet. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/preferences" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "magicDNS": false -} -``` - -## Set DNS preferences - -```http -POST /api/v2/tailnet/{tailnet}/dns/preferences -``` - -Set the DNS preferences for a tailnet; specifically, the MagicDNS setting. -Note that MagicDNS is dependent on DNS servers. -Learn about [MagicDNS](https://tailscale.com/kb/1081). - -If there is at least one DNS server, then MagicDNS can be enabled. -Otherwise, it returns an error. - -Note that removing all nameservers will turn off MagicDNS. -To reenable it, nameservers must be added back, and MagicDNS must be explicitly turned on. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### DNS preference (required in `POST` body) - -The DNS preferences in JSON. Currently, MagicDNS is the only setting available: - -- **`magicDNS`:** Automatically registers DNS names for devices in your tailnet. - -```jsonc -{ - "magicDNS": true -} -``` - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/preferences" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"magicDNS": true}' -``` - -### Response - -If there are no DNS servers, this returns an error message: - -```jsonc -{ - "message": "need at least one nameserver to enable MagicDNS" -} -``` - -If there are DNS servers, this returns the MagicDNS status: - -```jsonc -{ - "magicDNS": true -} -``` - -## Search Paths - -## Get search paths - -```http -GET /api/v2/tailnet/{tailnet}/dns/searchpaths -``` - -Retrieves the list of search paths, also referred to as _search domains_, that is currently set for the given tailnet. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/searchpaths" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "searchPaths": ["user1.example.com"] -} -``` - -## Set search paths - -```http -POST /api/v2/tailnet/{tailnet}/dns/searchpaths -``` - -Replaces the list of search paths with the list supplied by the user and returns an error otherwise. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `searchPaths` (required in `POST` body) - -Specify a list of search paths in a JSON object: - -```jsonc -{ - "searchPaths": ["user1.example.com", "user2.example.com"] -} -``` - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/searchpaths" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"searchPaths": ["user1.example.com", "user2.example.com"]}' -``` - -### Response - -The response is a JSON object containing the new list of search paths. - -```jsonc -{ - "searchPaths": ["user1.example.com", "user2.example.com"] -} -``` - -## Split DNS - -## Get split DNS - -```http -GET /api/v2/tailnet/{tailnet}/dns/split-dns -``` - -Retrieves the split DNS settings, which is a map from domains to lists of nameservers, that is currently set for the given tailnet. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/tailnet/example.com/dns/split-dns" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "example.com": ["1.1.1.1", "1.2.3.4"], - "other.com": ["2.2.2.2"] -} -``` - -## Update split DNS - -```http -PATCH /api/v2/tailnet/{tailnet}/dns/split-dns -``` - -Performs partial updates of the split DNS settings for a given tailnet. Only domains specified in the request map will be modified. Setting the value of a mapping to "null" clears the nameservers for that domain. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `PATCH` body format - -Specify mappings from domain name to a list of nameservers in a JSON object: - -```jsonc -{ - "example.com": ["1.1.1.1", "1.2.3.4"], - "other.com": ["2.2.2.2"] -} -``` - -### Request example: updating split DNS settings for multiple domains - -```sh -curl -X PATCH "https://api.tailscale.com/api/v2/tailnet/example.com/dns/split-dns" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"example.com": ["1.1.1.1", "1.2.3.4"], "other.com": ["2.2.2.2"]}' -``` - -### Response: updating split DNS settings for multiple domains - -The response is a JSON object containing the updated map of split DNS settings. - -```jsonc -{ - "example.com": ["1.1.1.1", "1.2.3.4"], - "other.com": ["2.2.2.2"], - -} -``` - -### Request example: unsetting nameservers for a domain - -```sh -curl -X PATCH "https://api.tailscale.com/api/v2/tailnet/example.com/dns/split-dns" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"example.com": null}' -``` - -### Response: unsetting nameservers for a domain - -The response is a JSON object containing the updated map of split DNS settings. - -```jsonc -{ - -} -``` - -## Set split DNS - -```http -PUT /api/v2/tailnet/{tailnet}/dns/split-dns -``` - -Replaces the split DNS settings for a given tailnet. Setting the value of a mapping to "null" clears the nameservers for that domain. Sending an empty object clears nameservers for all domains. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### `PUT` body format - -Specify mappings from domain name to a list of nameservers in a JSON object: - -```jsonc -{ - "example.com": ["1.2.3.4"], - "other.com": ["2.2.2.2"] -} -``` - -### Request example: setting multiple domains - -```sh -curl -X PUT "https://api.tailscale.com/api/v2/tailnet/example.com/dns/split-dns" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{"example.com": ["1.2.3.4"], "other.com": ["2.2.2.2"]}' -``` - -### Response: unsetting nameservers for a domain - -The response is a JSON object containing the updated map of split DNS settings. - -```jsonc -{ - "example.com": ["1.2.3.4"], - "other.com": ["2.2.2.2"] -} -``` - -### Request example: unsetting all domains - -```sh -curl -X PUT "https://api.tailscale.com/api/v2/tailnet/example.com/dns/split-dns" \ - -u "tskey-api-xxxxx:" \ - -H "Content-Type: application/json" \ - --data-binary '{}' -``` - -### Response: unsetting nameservers for a domain - -The response is a JSON object containing the updated map of split DNS settings. - -```jsonc -{} -``` - -## Tailnet user invites - -The tailnet user invite methods let you create and list [invites](https://tailscale.com/kb/1371/invite-users). - -## List user invites - -```http -GET /api/v2/tailnet/{tailnet}/user-invites -``` - -List all user invites that haven't been accepted. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -### Request example - -```sh -curl -X GET "https://api.tailscale.com/api/v2/tailnet/example.com/user-invites" \ --u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -[ - { - "id": "29214", - "role": "member", - "tailnetId": 12345, - "inviterId": 34567, - "email": "user@example.com", - "lastEmailSentAt": "2024-05-09T16:13:16.084568545Z", - "inviteUrl": "https://login.tailscale.com/uinv/" - }, - { - "id": "29215", - "role": "admin", - "tailnetId": 12345, - "inviterId": 34567, - "inviteUrl": "https://login.tailscale.com/uinv/" - } -] -``` - -## Create user invites - -```http -POST /api/v2/tailnet/{tailnet}/user-invites -``` - -Create new user invites to join the tailnet. - -### Parameters - -#### `tailnet` (required in URL path) - -The tailnet organization name. - -#### List of invite requests (required in `POST` body) - -Each invite request is an object with the following optional fields: - -- **`role`:** (Optional) Specify a [user role](https://tailscale.com/kb/1138/user-roles) to assign the invited user. Defaults to the `"member"` role. Valid options are: - - `"member"`: Assign the Member role. - - `"admin"`: Assign the Admin role. - - `"it-admin"`: Assign the IT admin role. - - `"network-admin"`: Assign the Network admin role. - - `"billing-admin"`: Assign the Billing admin role. - - `"auditor"`: Assign the Auditor role. -- **`email`:** (Optional) Specify the email to send the created invite. If not set, the endpoint generates and returns an invite URL (but doesn't send it out). - -### Request example - -```sh -curl -X POST "https://api.tailscale.com/api/v2/tailnet/example.com/user-invites" \ --u "tskey-api-xxxxx:" \ --H "Content-Type: application/json" \ ---data-binary '[{"role": "admin", "email":"user@example.com"}]' -``` - -### Response - -```jsonc -[ - { - "id": "29214", - "role": "admin", - "tailnetId": 12345, - "inviterId": 34567, - "email": "user@example.com", - "lastEmailSentAt": "2024-05-09T16:23:26.91778771Z", - "inviteUrl": "https://login.tailscale.com/uinv/" - } -] -``` diff --git a/publicapi/userinvites.md b/publicapi/userinvites.md deleted file mode 100644 index f0b463edd788c..0000000000000 --- a/publicapi/userinvites.md +++ /dev/null @@ -1,147 +0,0 @@ -> [!IMPORTANT] -> The Tailscale API documentation has moved to https://tailscale.com/api - -# User invites - -A user invite is an active invitation that lets a user join a tailnet with a pre-assigned [user role](https://tailscale.com/kb/1138/user-roles). - -Each user invite has a unique ID that is used to identify the invite in API calls. -You can find all user invite IDs for a particular tailnet by [listing user invites](#list-user-invites). - -### Attributes - -```jsonc -{ - // id (string) is the unique identifier for the invite. - // Supply this value wherever {userInviteId} is indicated in the endpoint. - "id": "12346", - - // role is the tailnet user role to assign to the invited user upon accepting - // the invite. Value options are "member", "admin", "it-admin", "network-admin", - // "billing-admin", and "auditor". - "role": "admin", - - // tailnetId is the ID of the tailnet to which the user was invited. - "tailnetId": 59954, - - // inviterId is the ID of the user who created the invite. - "inviterId": 22012, - - // email is the email to which the invite was sent. - // If empty, the invite was not emailed to anyone, but the inviteUrl can be - // shared manually. - "email": "user@example.com", - - // lastEmailSentAt is the last time the invite was attempted to be sent to - // Email. Only ever set if `email` is not empty. - "lastEmailSentAt": "2024-04-03T21:38:49.333829261Z", - - // inviteUrl is included when `email` is not part of the tailnet's domain, - // or when `email` is empty. It is the link to accept the invite. - // - // When included, anyone with this link can accept the invite. - // It is not restricted to the person to which the invite was emailed. - // - // When `email` is part of the tailnet's domain (has the same @domain.com - // suffix as the tailnet), the user can join the tailnet automatically by - // logging in with their domain email at https://login.tailscale.com/start. - // They'll be assigned the specified `role` upon signing in for the first - // time. - "inviteUrl": "https://login.tailscale.com/admin/invite/" -} -``` - -# API - -**[User invites](#user-invites)** - -- Get user invite: [`GET /api/v2/user-invites/{userInviteId}`](#get-user-invite) -- Delete user invite: [`DELETE /api/v2/user-invites/{userInviteId}`](#delete-user-invite) -- Resend user invite (by email): [`POST /api/v2/user-invites/{userInviteId}/resend`](#resend-user-invite) - -## Get user invite - -```http -GET /api/v2/user-invites/{userInviteId} -``` - -Retrieve the specified user invite. - -### Parameters - -#### `userInviteId` (required in URL path) - -The ID of the user invite. - -### Request example - -```sh -curl "https://api.tailscale.com/api/v2/user-invites/29214" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -```jsonc -{ - "id": "29214", - "role": "admin", - "tailnetId": 12345, - "inviterId": 34567, - "email": "user@example.com", - "lastEmailSentAt": "2024-05-09T16:23:26.91778771Z", - "inviteUrl": "https://login.tailscale.com/uinv/" -} -``` - -## Delete user invite - -```http -DELETE /api/v2/user-invites/{userInviteId} -``` - -Delete the specified user invite. - -### Parameters - -#### `userInviteId` (required in URL path) - -The ID of the user invite. - -### Request example - -```sh -curl -X DELETE "https://api.tailscale.com/api/v2/user-invites/29214" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -The response is 2xx on success. The response body is an empty JSON object. - -## Resend user invite - -```http -POST /api/v2/user-invites/{userInviteId}/resend -``` - -Resend the specified user invite by email. You can only use this if the specified invite was originally created with an email specified. Refer to [creating user invites for a tailnet](#create-user-invites). - -Note: Invite resends are rate limited to one per minute. - -### Parameters - -#### `userInviteId` (required in URL path) - -The ID of the user invite. - -### Request example - -```sh -curl -X POST "https://api.tailscale.com/api/v2/user-invites/29214/resend" \ - -u "tskey-api-xxxxx:" -``` - -### Response - -The response is 2xx on success. The response body is an empty JSON object. diff --git a/release/dist/synology/pkgs.go b/release/dist/synology/pkgs.go index d6df06d961cc8..7802470e167fe 100644 --- a/release/dist/synology/pkgs.go +++ b/release/dist/synology/pkgs.go @@ -24,13 +24,14 @@ import ( type target struct { filenameArch string dsmMajorVersion int + dsmMinorVersion int goenv map[string]string packageCenter bool signer dist.Signer } func (t *target) String() string { - return fmt.Sprintf("synology/dsm%d/%s", t.dsmMajorVersion, t.filenameArch) + return fmt.Sprintf("synology/dsm%s/%s", t.dsmVersionString(), t.filenameArch) } func (t *target) Build(b *dist.Build) ([]string, error) { @@ -42,9 +43,31 @@ func (t *target) Build(b *dist.Build) ([]string, error) { return t.buildSPK(b, inner) } +// dsmVersionInt combines major and minor version info into an int +// representation. +// +// Version 7.2 becomes 72 as an example. +func (t *target) dsmVersionInt() int { + return t.dsmMajorVersion*10 + t.dsmMinorVersion +} + +// dsmVersionString returns a string representation of the version +// including minor version information if it exists. +// +// If dsmMinorVersion is 0 this returns dsmMajorVersion as a string, +// otherwise it returns "dsmMajorVersion-dsmMinorVersion". +func (t *target) dsmVersionString() string { + dsmVersionString := fmt.Sprintf("%d", t.dsmMajorVersion) + if t.dsmMinorVersion != 0 { + dsmVersionString = fmt.Sprintf("%s-%d", dsmVersionString, t.dsmMinorVersion) + } + + return dsmVersionString +} + func (t *target) buildSPK(b *dist.Build, inner *innerPkg) ([]string, error) { - synoVersion := b.Version.Synology[t.dsmMajorVersion] - filename := fmt.Sprintf("tailscale-%s-%s-%d-dsm%d.spk", t.filenameArch, b.Version.Short, synoVersion, t.dsmMajorVersion) + synoVersion := b.Version.Synology[t.dsmVersionInt()] + filename := fmt.Sprintf("tailscale-%s-%s-%d-dsm%s.spk", t.filenameArch, b.Version.Short, synoVersion, t.dsmVersionString()) out := filepath.Join(b.Out, filename) if t.packageCenter { log.Printf("Building %s (for package center)", filename) @@ -117,7 +140,7 @@ func (t *target) mkInfo(b *dist.Build, uncompressedSz int64) []byte { fmt.Fprintf(&ret, "%s=%q\n", k, v) } f("package", "Tailscale") - f("version", fmt.Sprintf("%s-%d", b.Version.Short, b.Version.Synology[t.dsmMajorVersion])) + f("version", fmt.Sprintf("%s-%d", b.Version.Short, b.Version.Synology[t.dsmVersionInt()])) f("arch", t.filenameArch) f("description", "Connect all your devices using WireGuard, without the hassle.") f("displayname", "Tailscale") diff --git a/release/dist/synology/targets.go b/release/dist/synology/targets.go index cea372e1a4d17..bc7b20afca5d3 100644 --- a/release/dist/synology/targets.go +++ b/release/dist/synology/targets.go @@ -28,11 +28,22 @@ var v7Models = []string{ func Targets(forPackageCenter bool, signer dist.Signer) []dist.Target { var ret []dist.Target - for _, dsmVersion := range []int{6, 7} { + for _, dsmVersion := range []struct { + major int + minor int + }{ + // DSM6 + {major: 6}, + // DSM7 + {major: 7}, + // DSM7.2 + {major: 7, minor: 2}, + } { ret = append(ret, &target{ filenameArch: "x86_64", - dsmMajorVersion: dsmVersion, + dsmMajorVersion: dsmVersion.major, + dsmMinorVersion: dsmVersion.minor, goenv: map[string]string{ "GOOS": "linux", "GOARCH": "amd64", @@ -42,7 +53,8 @@ func Targets(forPackageCenter bool, signer dist.Signer) []dist.Target { }, &target{ filenameArch: "i686", - dsmMajorVersion: dsmVersion, + dsmMajorVersion: dsmVersion.major, + dsmMinorVersion: dsmVersion.minor, goenv: map[string]string{ "GOOS": "linux", "GOARCH": "386", @@ -52,7 +64,8 @@ func Targets(forPackageCenter bool, signer dist.Signer) []dist.Target { }, &target{ filenameArch: "armv8", - dsmMajorVersion: dsmVersion, + dsmMajorVersion: dsmVersion.major, + dsmMinorVersion: dsmVersion.minor, goenv: map[string]string{ "GOOS": "linux", "GOARCH": "arm64", @@ -67,7 +80,8 @@ func Targets(forPackageCenter bool, signer dist.Signer) []dist.Target { for _, v5Arch := range v5Models { ret = append(ret, &target{ filenameArch: v5Arch, - dsmMajorVersion: dsmVersion, + dsmMajorVersion: dsmVersion.major, + dsmMinorVersion: dsmVersion.minor, goenv: map[string]string{ "GOOS": "linux", "GOARCH": "arm", @@ -80,7 +94,8 @@ func Targets(forPackageCenter bool, signer dist.Signer) []dist.Target { for _, v7Arch := range v7Models { ret = append(ret, &target{ filenameArch: v7Arch, - dsmMajorVersion: dsmVersion, + dsmMajorVersion: dsmVersion.major, + dsmMinorVersion: dsmVersion.minor, goenv: map[string]string{ "GOOS": "linux", "GOARCH": "arm", diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index f7d97f7fdba63..18197846d307f 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || darwin || freebsd +//go:build linux || windows || (darwin && !ios) || freebsd package safesocket diff --git a/safeweb/http.go b/safeweb/http.go index c2787611e2457..14c61336ac311 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -94,6 +94,10 @@ var defaultCSP = strings.Join([]string{ `object-src 'self'`, // disallow embedding of resources from other origins }, "; ") +// The default Strict-Transport-Security header. This header tells the browser +// to exclusively use HTTPS for all requests to the origin for the next year. +var DefaultStrictTransportSecurityOptions = "max-age=31536000" + // Config contains the configuration for a safeweb server. type Config struct { // SecureContext specifies whether the Server is running in a secure (HTTPS) context. @@ -134,6 +138,18 @@ type Config struct { // CookiesSameSiteLax specifies whether to use SameSite=Lax in cookies. The // default is to set SameSite=Strict. CookiesSameSiteLax bool + + // StrictTransportSecurityOptions specifies optional directives for the + // Strict-Transport-Security header sent in response to requests made to the + // BrowserMux when SecureContext is true. + // If empty, it defaults to max-age of 1 year. + StrictTransportSecurityOptions string + + // HTTPServer, if specified, is the underlying http.Server that safeweb will + // use to serve requests. If nil, a new http.Server will be created. + // Do not use the Handler field of http.Server, as it will be ignored. + // Instead, set your handlers using APIMux and BrowserMux. + HTTPServer *http.Server } func (c *Config) setDefaults() error { @@ -193,7 +209,11 @@ func NewServer(config Config) (*Server, error) { if config.CSPAllowInlineStyles { s.csp = defaultCSP + `; style-src 'self' 'unsafe-inline'` } - s.h = &http.Server{Handler: s} + s.h = cmp.Or(config.HTTPServer, &http.Server{}) + if s.h.Handler != nil { + return nil, fmt.Errorf("use safeweb.Config.APIMux and safeweb.Config.BrowserMux instead of http.Server.Handler") + } + s.h.Handler = s return s, nil } @@ -274,6 +294,9 @@ func (s *Server) serveBrowser(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Security-Policy", s.csp) w.Header().Set("X-Content-Type-Options", "nosniff") w.Header().Set("Referer-Policy", "same-origin") + if s.SecureContext { + w.Header().Set("Strict-Transport-Security", cmp.Or(s.StrictTransportSecurityOptions, DefaultStrictTransportSecurityOptions)) + } s.csrfProtect(s.BrowserMux).ServeHTTP(w, r) } @@ -301,6 +324,19 @@ func (s *Server) Serve(ln net.Listener) error { return s.h.Serve(ln) } +// ListenAndServe listens on the TCP network address addr and then calls Serve +// to handle requests on incoming connections. If addr == "", ":http" is used. +func (s *Server) ListenAndServe(addr string) error { + if addr == "" { + addr = ":http" + } + lst, err := net.Listen("tcp", addr) + if err != nil { + return err + } + return s.Serve(lst) +} + // Close closes all client connections and stops accepting new ones. func (s *Server) Close() error { return s.h.Close() diff --git a/safeweb/http_test.go b/safeweb/http_test.go index f48aa64a79b7a..cec14b2b9bb8b 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -10,7 +10,9 @@ import ( "strconv" "strings" "testing" + "time" + "github.com/google/go-cmp/cmp" "github.com/gorilla/csrf" ) @@ -561,3 +563,73 @@ func TestGetMoreSpecificPattern(t *testing.T) { }) } } + +func TestStrictTransportSecurityOptions(t *testing.T) { + tests := []struct { + name string + options string + secureContext bool + expect string + }{ + { + name: "off by default", + }, + { + name: "default HSTS options in the secure context", + secureContext: true, + expect: DefaultStrictTransportSecurityOptions, + }, + { + name: "custom options sent in the secure context", + options: DefaultStrictTransportSecurityOptions + "; includeSubDomains", + secureContext: true, + expect: DefaultStrictTransportSecurityOptions + "; includeSubDomains", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := &http.ServeMux{} + h.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok")) + })) + s, err := NewServer(Config{BrowserMux: h, SecureContext: tt.secureContext, StrictTransportSecurityOptions: tt.options}) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + s.h.Handler.ServeHTTP(w, req) + resp := w.Result() + + if cmp.Diff(tt.expect, resp.Header.Get("Strict-Transport-Security")) != "" { + t.Fatalf("HSTS want: %q; got: %q", tt.expect, resp.Header.Get("Strict-Transport-Security")) + } + }) + } +} + +func TestOverrideHTTPServer(t *testing.T) { + s, err := NewServer(Config{}) + if err != nil { + t.Fatalf("NewServer: %v", err) + } + if s.h.IdleTimeout != 0 { + t.Fatalf("got %v; want 0", s.h.IdleTimeout) + } + + c := http.Server{ + IdleTimeout: 10 * time.Second, + } + + s, err = NewServer(Config{HTTPServer: &c}) + if err != nil { + t.Fatalf("NewServer: %v", err) + } + + if s.h.IdleTimeout != c.IdleTimeout { + t.Fatalf("got %v; want %v", s.h.IdleTimeout, c.IdleTimeout) + } +} diff --git a/ssh/tailssh/accept_env.go b/ssh/tailssh/accept_env.go new file mode 100644 index 0000000000000..6461a79a3408b --- /dev/null +++ b/ssh/tailssh/accept_env.go @@ -0,0 +1,119 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailssh + +import ( + "fmt" + "slices" + "strings" +) + +// filterEnv filters a passed in environ string slice (a slice with strings +// representing environment variables in the form "key=value") based on +// the supplied slice of acceptEnv values. +// +// acceptEnv is a slice of environment variable names that are allowlisted +// for the SSH rule in the policy file. +// +// acceptEnv values may contain * and ? wildcard characters which match against +// zero or one or more characters and a single character respectively. +func filterEnv(acceptEnv []string, environ []string) ([]string, error) { + var acceptedPairs []string + + // Quick return if we have an empty list. + if acceptEnv == nil || len(acceptEnv) == 0 { + return acceptedPairs, nil + } + + for _, envPair := range environ { + variableName, _, ok := strings.Cut(envPair, "=") + if !ok { + return nil, fmt.Errorf(`invalid environment variable: %q. Variables must be in "KEY=VALUE" format`, envPair) + } + + // Short circuit if we have a direct match between the environment + // variable and an AcceptEnv value. + if slices.Contains(acceptEnv, variableName) { + acceptedPairs = append(acceptedPairs, envPair) + continue + } + + // Otherwise check if we have a wildcard pattern that matches. + if matchAcceptEnv(acceptEnv, variableName) { + acceptedPairs = append(acceptedPairs, envPair) + continue + } + } + + return acceptedPairs, nil +} + +// matchAcceptEnv is a convenience function that wraps calling matchAcceptEnvPattern +// with every value in acceptEnv for a given env that is being matched against. +func matchAcceptEnv(acceptEnv []string, env string) bool { + for _, pattern := range acceptEnv { + if matchAcceptEnvPattern(pattern, env) { + return true + } + } + + return false +} + +// matchAcceptEnvPattern returns true if the pattern matches against the target string. +// Patterns may include * and ? wildcard characters which match against zero or one or +// more characters and a single character respectively. +func matchAcceptEnvPattern(pattern string, target string) bool { + patternIdx := 0 + targetIdx := 0 + + for { + // If we are at the end of the pattern we can only have a match if we + // are also at the end of the target. + if patternIdx >= len(pattern) { + return targetIdx >= len(target) + } + + if pattern[patternIdx] == '*' { + // Optimization to skip through any repeated asterisks as they + // have the same net effect on our search. + for patternIdx < len(pattern) { + if pattern[patternIdx] != '*' { + break + } + + patternIdx++ + } + + // We are at the end of the pattern after matching the asterisk, + // implying a match. + if patternIdx >= len(pattern) { + return true + } + + // Search through the target sequentially for the next character + // from the pattern string, recursing into matchAcceptEnvPattern + // to try and find a match. + for ; targetIdx < len(target); targetIdx++ { + if matchAcceptEnvPattern(pattern[patternIdx:], target[targetIdx:]) { + return true + } + } + + // No match after searching through the entire target. + return false + } + + if targetIdx >= len(target) { + return false + } + + if pattern[patternIdx] != '?' && pattern[patternIdx] != target[targetIdx] { + return false + } + + patternIdx++ + targetIdx++ + } +} diff --git a/ssh/tailssh/accept_env_test.go b/ssh/tailssh/accept_env_test.go new file mode 100644 index 0000000000000..b54c980978ece --- /dev/null +++ b/ssh/tailssh/accept_env_test.go @@ -0,0 +1,156 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailssh + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestMatchAcceptEnvPattern(t *testing.T) { + testCases := []struct { + pattern string + target string + match bool + }{ + {pattern: "*", target: "EXAMPLE_ENV", match: true}, + {pattern: "***", target: "123456", match: true}, + + {pattern: "?", target: "A", match: true}, + {pattern: "?", target: "123", match: false}, + + {pattern: "?*", target: "EXAMPLE_2", match: true}, + {pattern: "?*", target: "", match: false}, + + {pattern: "*?", target: "A", match: true}, + {pattern: "*?", target: "", match: false}, + + {pattern: "??", target: "CC", match: true}, + {pattern: "??", target: "123", match: false}, + + {pattern: "*?*", target: "ABCDEFG", match: true}, + {pattern: "*?*", target: "C", match: true}, + {pattern: "*?*", target: "", match: false}, + + {pattern: "?*?", target: "ABCDEFG", match: true}, + {pattern: "?*?", target: "A", match: false}, + + {pattern: "**?TEST", target: "_TEST", match: true}, + {pattern: "**?TEST", target: "_TESTING", match: false}, + + {pattern: "TEST**?", target: "TEST_", match: true}, + {pattern: "TEST**?", target: "A_TEST_", match: false}, + + {pattern: "TEST_*", target: "TEST_A", match: true}, + {pattern: "TEST_*", target: "TEST_A_LONG_ENVIRONMENT_VARIABLE_NAME", match: true}, + {pattern: "TEST_*", target: "TEST", match: false}, + + {pattern: "EXAMPLE_?_ENV", target: "EXAMPLE_A_ENV", match: true}, + {pattern: "EXAMPLE_?_ENV", target: "EXAMPLE_ENV", match: false}, + + {pattern: "EXAMPLE_*_ENV", target: "EXAMPLE_aBcd2231---_ENV", match: true}, + {pattern: "EXAMPLE_*_ENV", target: "EXAMPLEENV", match: false}, + + {pattern: "COMPLICA?ED_PATTERN*", target: "COMPLICATED_PATTERN_REST", match: true}, + {pattern: "COMPLICA?ED_PATTERN*", target: "COMPLICATED_PATT", match: false}, + + {pattern: "COMPLICAT???ED_PATT??ERN", target: "COMPLICAT123ED_PATTggERN", match: true}, + {pattern: "COMPLICAT???ED_PATT??ERN", target: "COMPLICATED_PATTERN", match: false}, + + {pattern: "DIRECT_MATCH", target: "DIRECT_MATCH", match: true}, + {pattern: "DIRECT_MATCH", target: "MISS", match: false}, + + // OpenSSH compatibility cases + // See https://github.com/openssh/openssh-portable/blob/master/regress/unittests/match/tests.c + {pattern: "", target: "", match: true}, + {pattern: "aaa", target: "", match: false}, + {pattern: "", target: "aaa", match: false}, + {pattern: "aaaa", target: "aaa", match: false}, + {pattern: "aaa", target: "aaaa", match: false}, + {pattern: "*", target: "", match: true}, + {pattern: "?", target: "a", match: true}, + {pattern: "a?", target: "aa", match: true}, + {pattern: "*", target: "a", match: true}, + {pattern: "a*", target: "aa", match: true}, + {pattern: "?*", target: "aa", match: true}, + {pattern: "**", target: "aa", match: true}, + {pattern: "?a", target: "aa", match: true}, + {pattern: "*a", target: "aa", match: true}, + {pattern: "a?", target: "ba", match: false}, + {pattern: "a*", target: "ba", match: false}, + {pattern: "?a", target: "ab", match: false}, + {pattern: "*a", target: "ab", match: false}, + } + + for _, tc := range testCases { + name := fmt.Sprintf("pattern_%s_target_%s", tc.pattern, tc.target) + if tc.match { + name += "_should_match" + } else { + name += "_should_not_match" + } + + t.Run(name, func(t *testing.T) { + match := matchAcceptEnvPattern(tc.pattern, tc.target) + if match != tc.match { + t.Errorf("got %v, want %v", match, tc.match) + } + }) + } +} + +func TestFilterEnv(t *testing.T) { + testCases := []struct { + name string + acceptEnv []string + environ []string + expectedFiltered []string + wantErrMessage string + }{ + { + name: "simple direct matches", + acceptEnv: []string{"FOO", "FOO2", "FOO_3"}, + environ: []string{"FOO=BAR", "FOO2=BAZ", "FOO_3=123", "FOOOO4-2=AbCdEfG"}, + expectedFiltered: []string{"FOO=BAR", "FOO2=BAZ", "FOO_3=123"}, + }, + { + name: "bare wildcard", + acceptEnv: []string{"*"}, + environ: []string{"FOO=BAR", "FOO2=BAZ", "FOO_3=123", "FOOOO4-2=AbCdEfG"}, + expectedFiltered: []string{"FOO=BAR", "FOO2=BAZ", "FOO_3=123", "FOOOO4-2=AbCdEfG"}, + }, + { + name: "complex matches", + acceptEnv: []string{"FO?", "FOOO*", "FO*5?7"}, + environ: []string{"FOO=BAR", "FOO2=BAZ", "FOO_3=123", "FOOOO4-2=AbCdEfG", "FO1-kmndGamc79567=ABC", "FO57=BAR2"}, + expectedFiltered: []string{"FOO=BAR", "FOOOO4-2=AbCdEfG", "FO1-kmndGamc79567=ABC"}, + }, + { + name: "environ format invalid", + acceptEnv: []string{"FO?", "FOOO*", "FO*5?7"}, + environ: []string{"FOOBAR"}, + expectedFiltered: nil, + wantErrMessage: `invalid environment variable: "FOOBAR". Variables must be in "KEY=VALUE" format`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filtered, err := filterEnv(tc.acceptEnv, tc.environ) + if err == nil && tc.wantErrMessage != "" { + t.Errorf("wanted error with message %q but error was nil", tc.wantErrMessage) + } + + if err != nil && err.Error() != tc.wantErrMessage { + t.Errorf("err = %v; want %v", err, tc.wantErrMessage) + } + + if diff := cmp.Diff(tc.expectedFiltered, filtered); diff != "" { + t.Errorf("unexpected filter result (-got,+want): \n%s", diff) + } + }) + } +} diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 37f2a54343ed2..7748376b2548b 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -12,6 +12,7 @@ package tailssh import ( + "encoding/json" "errors" "flag" "fmt" @@ -154,6 +155,22 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err incubatorArgs = append(incubatorArgs, "--cmd="+ss.RawCommand()) } + allowSendEnv := nm.HasCap(tailcfg.NodeAttrSSHEnvironmentVariables) + if allowSendEnv { + env, err := filterEnv(ss.conn.acceptEnv, ss.Session.Environ()) + if err != nil { + return nil, err + } + + if len(env) > 0 { + encoded, err := json.Marshal(env) + if err != nil { + return nil, fmt.Errorf("failed to encode environment: %w", err) + } + incubatorArgs = append(incubatorArgs, fmt.Sprintf("--encoded-env=%q", encoded)) + } + } + return exec.CommandContext(ss.ctx, ss.conn.srv.tailscaledPath, incubatorArgs...), nil } @@ -192,6 +209,9 @@ type incubatorArgs struct { forceV1Behavior bool debugTest bool isSELinuxEnforcing bool + encodedEnv string + allowListEnvKeys string + forwardedEnviron []string } func parseIncubatorArgs(args []string) (incubatorArgs, error) { @@ -215,6 +235,7 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { flags.BoolVar(&ia.forceV1Behavior, "force-v1-behavior", false, "allow falling back to the su command if login is unavailable") flags.BoolVar(&ia.debugTest, "debug-test", false, "should debug in test mode") flags.BoolVar(&ia.isSELinuxEnforcing, "is-selinux-enforcing", false, "whether SELinux is in enforcing mode") + flags.StringVar(&ia.encodedEnv, "encoded-env", "", "JSON encoded array of environment variables in '['key=value']' format") flags.Parse(args) for _, g := range strings.Split(groups, ",") { @@ -225,6 +246,30 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { ia.gids = append(ia.gids, gid) } + ia.forwardedEnviron = os.Environ() + // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding + ia.allowListEnvKeys = "SSH_AUTH_SOCK" + + if ia.encodedEnv != "" { + unquoted, err := strconv.Unquote(ia.encodedEnv) + if err != nil { + return ia, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + } + + var extraEnviron []string + + err = json.Unmarshal([]byte(unquoted), &extraEnviron) + if err != nil { + return ia, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + } + + ia.forwardedEnviron = append(ia.forwardedEnviron, extraEnviron...) + + for _, v := range extraEnviron { + ia.allowListEnvKeys = fmt.Sprintf("%s,%s", ia.allowListEnvKeys, strings.Split(v, "=")[0]) + } + } + return ia, nil } @@ -406,7 +451,7 @@ func tryExecLogin(dlogf logger.Logf, ia incubatorArgs) error { dlogf("logging in with %+v", loginArgs) // If Exec works, the Go code will not proceed past this: - err = unix.Exec(loginCmdPath, loginArgs, os.Environ()) + err = unix.Exec(loginCmdPath, loginArgs, ia.forwardedEnviron) // If we made it here, Exec failed. return err @@ -441,7 +486,7 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { loginArgs := []string{ su, - "-w", "SSH_AUTH_SOCK", // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding + "-w", ia.allowListEnvKeys, "-l", ia.localUser, } @@ -453,7 +498,7 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { dlogf("logging in with %+v", loginArgs) // If Exec works, the Go code will not proceed past this: - err = unix.Exec(su, loginArgs, os.Environ()) + err = unix.Exec(su, loginArgs, ia.forwardedEnviron) // If we made it here, Exec failed. return true, err @@ -482,11 +527,11 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { return "" } - // First try to execute su -w SSH_AUTH_SOCK -l -c true + // First try to execute su -w -l -c true // to make sure su supports the necessary arguments. err = exec.Command( su, - "-w", "SSH_AUTH_SOCK", + "-w", ia.allowListEnvKeys, "-l", ia.localUser, "-c", "true", @@ -515,7 +560,7 @@ func handleSSHInProcess(dlogf logger.Logf, ia incubatorArgs) error { args := shellArgs(ia.isShell, ia.cmd) dlogf("running %s %q", ia.loginShell, args) - cmd := newCommand(ia.hasTTY, ia.loginShell, args) + cmd := newCommand(ia.hasTTY, ia.loginShell, ia.forwardedEnviron, args) err := cmd.Run() if ee, ok := err.(*exec.ExitError); ok { ps := ee.ProcessState @@ -532,12 +577,12 @@ func handleSSHInProcess(dlogf logger.Logf, ia incubatorArgs) error { return err } -func newCommand(hasTTY bool, cmdPath string, cmdArgs []string) *exec.Cmd { +func newCommand(hasTTY bool, cmdPath string, cmdEnviron []string, cmdArgs []string) *exec.Cmd { cmd := exec.Command(cmdPath, cmdArgs...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - cmd.Env = os.Environ() + cmd.Env = cmdEnviron if hasTTY { // If we were launched with a tty then we should mark that as the ctty @@ -747,8 +792,10 @@ func (ss *sshSession) launchProcess() error { func resizeWindow(fd int, winCh <-chan ssh.Window) { for win := range winCh { unix.IoctlSetWinsize(fd, syscall.TIOCSWINSZ, &unix.Winsize{ - Row: uint16(win.Height), - Col: uint16(win.Width), + Row: uint16(win.Height), + Col: uint16(win.Width), + Xpixel: uint16(win.WidthPixels), + Ypixel: uint16(win.HeightPixels), }) } } diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 7187b5b595ff7..9ade1847e6b27 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -238,6 +238,7 @@ type conn struct { localUser *userMeta // set by doPolicyAuth userGroupIDs []string // set by doPolicyAuth pubKey gossh.PublicKey // set by doPolicyAuth + acceptEnv []string // mu protects the following fields. // @@ -377,7 +378,7 @@ func (c *conn) doPolicyAuth(ctx ssh.Context, pubKey ssh.PublicKey) error { c.logf("failed to get conninfo: %v", err) return errDenied } - a, localUser, err := c.evaluatePolicy(pubKey) + a, localUser, acceptEnv, err := c.evaluatePolicy(pubKey) if err != nil { if pubKey == nil && c.havePubKeyPolicy() { return errPubKeyRequired @@ -387,6 +388,7 @@ func (c *conn) doPolicyAuth(ctx ssh.Context, pubKey ssh.PublicKey) error { c.action0 = a c.currentAction = a c.pubKey = pubKey + c.acceptEnv = acceptEnv if a.Message != "" { if err := ctx.SendAuthBanner(a.Message); err != nil { return fmt.Errorf("SendBanner: %w", err) @@ -619,16 +621,16 @@ func (c *conn) setInfo(ctx ssh.Context) error { // evaluatePolicy returns the SSHAction and localUser after evaluating // the SSHPolicy for this conn. The pubKey may be nil for "none" auth. -func (c *conn) evaluatePolicy(pubKey gossh.PublicKey) (_ *tailcfg.SSHAction, localUser string, _ error) { +func (c *conn) evaluatePolicy(pubKey gossh.PublicKey) (_ *tailcfg.SSHAction, localUser string, acceptEnv []string, _ error) { pol, ok := c.sshPolicy() if !ok { - return nil, "", fmt.Errorf("tailssh: rejecting connection; no SSH policy") + return nil, "", nil, fmt.Errorf("tailssh: rejecting connection; no SSH policy") } - a, localUser, ok := c.evalSSHPolicy(pol, pubKey) + a, localUser, acceptEnv, ok := c.evalSSHPolicy(pol, pubKey) if !ok { - return nil, "", fmt.Errorf("tailssh: rejecting connection; no matching policy") + return nil, "", nil, fmt.Errorf("tailssh: rejecting connection; no matching policy") } - return a, localUser, nil + return a, localUser, acceptEnv, nil } // pubKeyCacheEntry is the cache value for an HTTPS URL of public keys (like @@ -892,7 +894,7 @@ func (c *conn) newSSHSession(s ssh.Session) *sshSession { // isStillValid reports whether the conn is still valid. func (c *conn) isStillValid() bool { - a, localUser, err := c.evaluatePolicy(c.pubKey) + a, localUser, _, err := c.evaluatePolicy(c.pubKey) c.vlogf("stillValid: %+v %v %v", a, localUser, err) if err != nil { return false @@ -1275,13 +1277,13 @@ func (c *conn) ruleExpired(r *tailcfg.SSHRule) bool { return r.RuleExpires.Before(c.srv.now()) } -func (c *conn) evalSSHPolicy(pol *tailcfg.SSHPolicy, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, ok bool) { +func (c *conn) evalSSHPolicy(pol *tailcfg.SSHPolicy, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, ok bool) { for _, r := range pol.Rules { - if a, localUser, err := c.matchRule(r, pubKey); err == nil { - return a, localUser, true + if a, localUser, acceptEnv, err := c.matchRule(r, pubKey); err == nil { + return a, localUser, acceptEnv, true } } - return nil, "", false + return nil, "", nil, false } // internal errors for testing; they don't escape to callers or logs. @@ -1294,26 +1296,26 @@ var ( errInvalidConn = errors.New("invalid connection state") ) -func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, err error) { +func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg.SSHAction, localUser string, acceptEnv []string, err error) { defer func() { c.vlogf("matchRule(%+v): %v", r, err) }() if c == nil { - return nil, "", errInvalidConn + return nil, "", nil, errInvalidConn } if c.info == nil { c.logf("invalid connection state") - return nil, "", errInvalidConn + return nil, "", nil, errInvalidConn } if r == nil { - return nil, "", errNilRule + return nil, "", nil, errNilRule } if r.Action == nil { - return nil, "", errNilAction + return nil, "", nil, errNilAction } if c.ruleExpired(r) { - return nil, "", errRuleExpired + return nil, "", nil, errRuleExpired } if !r.Action.Reject { // For all but Reject rules, SSHUsers is required. @@ -1321,15 +1323,15 @@ func (c *conn) matchRule(r *tailcfg.SSHRule, pubKey gossh.PublicKey) (a *tailcfg // empty string anyway. localUser = mapLocalUser(r.SSHUsers, c.info.sshUser) if localUser == "" { - return nil, "", errUserMatch + return nil, "", nil, errUserMatch } } if ok, err := c.anyPrincipalMatches(r.Principals, pubKey); err != nil { - return nil, "", err + return nil, "", nil, err } else if !ok { - return nil, "", errPrincipalMatch + return nil, "", nil, errPrincipalMatch } - return r.Action, localUser, nil + return r.Action, localUser, r.AcceptEnv, nil } func mapLocalUser(ruleSSHUsers map[string]string, reqSSHUser string) (localUser string) { diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 485c13fdbd1b2..1799d340019cb 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -108,6 +108,7 @@ func TestIntegrationSSH(t *testing.T) { want []string forceV1Behavior bool skip bool + allowSendEnv bool }{ { cmd: "id", @@ -131,6 +132,18 @@ func TestIntegrationSSH(t *testing.T) { skip: os.Getenv("SKIP_FILE_OPS") == "1" || !fallbackToSUAvailable(), forceV1Behavior: false, }, + { + cmd: `echo "${GIT_ENV_VAR:-unset1} ${EXACT_MATCH:-unset2} ${TESTING:-unset3} ${NOT_ALLOWED:-unset4}"`, + want: []string{"working1 working2 working3 unset4"}, + forceV1Behavior: false, + allowSendEnv: true, + }, + { + cmd: `echo "${GIT_ENV_VAR:-unset1} ${EXACT_MATCH:-unset2} ${TESTING:-unset3} ${NOT_ALLOWED:-unset4}"`, + want: []string{"unset1 unset2 unset3 unset4"}, + forceV1Behavior: false, + allowSendEnv: false, + }, } for _, test := range tests { @@ -151,7 +164,13 @@ func TestIntegrationSSH(t *testing.T) { } t.Run(fmt.Sprintf("%s_%s_%s", test.cmd, shellQualifier, versionQualifier), func(t *testing.T) { - s := testSession(t, test.forceV1Behavior) + sendEnv := map[string]string{ + "GIT_ENV_VAR": "working1", + "EXACT_MATCH": "working2", + "TESTING": "working3", + "NOT_ALLOWED": "working4", + } + s := testSession(t, test.forceV1Behavior, test.allowSendEnv, sendEnv) if shell { err := s.RequestPty("xterm", 40, 80, ssh.TerminalModes{ @@ -201,7 +220,7 @@ func TestIntegrationSFTP(t *testing.T) { } wantText := "hello world" - cl := testClient(t, forceV1Behavior) + cl := testClient(t, forceV1Behavior, false) scl, err := sftp.NewClient(cl) if err != nil { t.Fatalf("can't get sftp client: %s", err) @@ -233,7 +252,7 @@ func TestIntegrationSFTP(t *testing.T) { t.Fatalf("unexpected file contents (-got +want):\n%s", diff) } - s := testSessionFor(t, cl) + s := testSessionFor(t, cl, nil) got := s.run(t, "ls -l "+filePath, false) if !strings.Contains(got, "testuser") { t.Fatalf("unexpected file owner user: %s", got) @@ -262,7 +281,7 @@ func TestIntegrationSCP(t *testing.T) { } wantText := "hello world" - cl := testClient(t, forceV1Behavior) + cl := testClient(t, forceV1Behavior, false) scl, err := scp.NewClientBySSH(cl) if err != nil { t.Fatalf("can't get sftp client: %s", err) @@ -291,7 +310,7 @@ func TestIntegrationSCP(t *testing.T) { t.Fatalf("unexpected file contents (-got +want):\n%s", diff) } - s := testSessionFor(t, cl) + s := testSessionFor(t, cl, nil) got := s.run(t, "ls -l "+filePath, false) if !strings.Contains(got, "testuser") { t.Fatalf("unexpected file owner user: %s", got) @@ -349,7 +368,7 @@ func TestSSHAgentForwarding(t *testing.T) { // Run tailscale SSH server and connect to it username := "testuser" - tailscaleAddr := testServer(t, username, false) + tailscaleAddr := testServer(t, username, false, false) tcl, err := ssh.Dial("tcp", tailscaleAddr, &ssh.ClientConfig{ HostKeyCallback: ssh.InsecureIgnoreHostKey(), }) @@ -465,11 +484,11 @@ readLoop: return string(_got) } -func testClient(t *testing.T, forceV1Behavior bool, authMethods ...ssh.AuthMethod) *ssh.Client { +func testClient(t *testing.T, forceV1Behavior bool, allowSendEnv bool, authMethods ...ssh.AuthMethod) *ssh.Client { t.Helper() username := "testuser" - addr := testServer(t, username, forceV1Behavior) + addr := testServer(t, username, forceV1Behavior, allowSendEnv) cl, err := ssh.Dial("tcp", addr, &ssh.ClientConfig{ HostKeyCallback: ssh.InsecureIgnoreHostKey(), @@ -483,9 +502,9 @@ func testClient(t *testing.T, forceV1Behavior bool, authMethods ...ssh.AuthMetho return cl } -func testServer(t *testing.T, username string, forceV1Behavior bool) string { +func testServer(t *testing.T, username string, forceV1Behavior bool, allowSendEnv bool) string { srv := &server{ - lb: &testBackend{localUser: username, forceV1Behavior: forceV1Behavior}, + lb: &testBackend{localUser: username, forceV1Behavior: forceV1Behavior, allowSendEnv: allowSendEnv}, logf: log.Printf, tailscaledPath: os.Getenv("TAILSCALED_PATH"), timeNow: time.Now, @@ -509,16 +528,20 @@ func testServer(t *testing.T, username string, forceV1Behavior bool) string { return l.Addr().String() } -func testSession(t *testing.T, forceV1Behavior bool) *session { - cl := testClient(t, forceV1Behavior) - return testSessionFor(t, cl) +func testSession(t *testing.T, forceV1Behavior bool, allowSendEnv bool, sendEnv map[string]string) *session { + cl := testClient(t, forceV1Behavior, allowSendEnv) + return testSessionFor(t, cl, sendEnv) } -func testSessionFor(t *testing.T, cl *ssh.Client) *session { +func testSessionFor(t *testing.T, cl *ssh.Client, sendEnv map[string]string) *session { s, err := cl.NewSession() if err != nil { t.Fatal(err) } + for k, v := range sendEnv { + s.Setenv(k, v) + } + t.Cleanup(func() { s.Close() }) stdinReader, stdinWriter := io.Pipe() @@ -564,6 +587,7 @@ func generateClientKey(t *testing.T, privateKeyFile string) (ssh.Signer, *rsa.Pr type testBackend struct { localUser string forceV1Behavior bool + allowSendEnv bool } func (tb *testBackend) GetSSH_HostKeys() ([]gossh.Signer, error) { @@ -597,6 +621,9 @@ func (tb *testBackend) NetMap() *netmap.NetworkMap { if tb.forceV1Behavior { capMap[tailcfg.NodeAttrSSHBehaviorV1] = struct{}{} } + if tb.allowSendEnv { + capMap[tailcfg.NodeAttrSSHEnvironmentVariables] = struct{}{} + } return &netmap.NetworkMap{ SSHPolicy: &tailcfg.SSHPolicy{ Rules: []*tailcfg.SSHRule{ @@ -604,6 +631,7 @@ func (tb *testBackend) NetMap() *netmap.NetworkMap { Principals: []*tailcfg.SSHPrincipal{{Any: true}}, Action: &tailcfg.SSHAction{Accept: true, AllowAgentForwarding: true}, SSHUsers: map[string]string{"*": tb.localUser}, + AcceptEnv: []string{"GIT_*", "EXACT_MATCH", "TEST?NG"}, }, }, }, diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index bfc67081400fd..9e4f5ffd3d481 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -24,6 +24,7 @@ import ( "os/user" "reflect" "runtime" + "slices" "strconv" "strings" "sync" @@ -56,11 +57,12 @@ import ( func TestMatchRule(t *testing.T) { someAction := new(tailcfg.SSHAction) tests := []struct { - name string - rule *tailcfg.SSHRule - ci *sshConnInfo - wantErr error - wantUser string + name string + rule *tailcfg.SSHRule + ci *sshConnInfo + wantErr error + wantUser string + wantAcceptEnv []string }{ { name: "invalid-conn", @@ -153,6 +155,21 @@ func TestMatchRule(t *testing.T) { ci: &sshConnInfo{sshUser: "alice"}, wantUser: "thealice", }, + { + name: "ok-with-accept-env", + rule: &tailcfg.SSHRule{ + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "*": "ubuntu", + "alice": "thealice", + }, + AcceptEnv: []string{"EXAMPLE", "?_?", "TEST_*"}, + }, + ci: &sshConnInfo{sshUser: "alice"}, + wantUser: "thealice", + wantAcceptEnv: []string{"EXAMPLE", "?_?", "TEST_*"}, + }, { name: "no-users-for-reject", rule: &tailcfg.SSHRule{ @@ -210,7 +227,7 @@ func TestMatchRule(t *testing.T) { info: tt.ci, srv: &server{logf: t.Logf}, } - got, gotUser, err := c.matchRule(tt.rule, nil) + got, gotUser, gotAcceptEnv, err := c.matchRule(tt.rule, nil) if err != tt.wantErr { t.Errorf("err = %v; want %v", err, tt.wantErr) } @@ -220,6 +237,128 @@ func TestMatchRule(t *testing.T) { if err == nil && got == nil { t.Errorf("expected non-nil action on success") } + if !slices.Equal(gotAcceptEnv, tt.wantAcceptEnv) { + t.Errorf("acceptEnv = %v; want %v", gotAcceptEnv, tt.wantAcceptEnv) + } + }) + } +} + +func TestEvalSSHPolicy(t *testing.T) { + someAction := new(tailcfg.SSHAction) + tests := []struct { + name string + policy *tailcfg.SSHPolicy + ci *sshConnInfo + wantMatch bool + wantUser string + wantAcceptEnv []string + }{ + { + name: "multiple-matches-picks-first-match", + policy: &tailcfg.SSHPolicy{ + Rules: []*tailcfg.SSHRule{ + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "other": "other1", + }, + }, + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "*": "ubuntu", + "alice": "thealice", + }, + AcceptEnv: []string{"EXAMPLE", "?_?", "TEST_*"}, + }, + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "other2": "other3", + }, + }, + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "*": "ubuntu", + "alice": "thealice", + "mark": "markthe", + }, + AcceptEnv: []string{"*"}, + }, + }, + }, + ci: &sshConnInfo{sshUser: "alice"}, + wantUser: "thealice", + wantAcceptEnv: []string{"EXAMPLE", "?_?", "TEST_*"}, + wantMatch: true, + }, + { + name: "no-matches-returns-failure", + policy: &tailcfg.SSHPolicy{ + Rules: []*tailcfg.SSHRule{ + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "other": "other1", + }, + }, + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "fedora": "ubuntu", + }, + AcceptEnv: []string{"EXAMPLE", "?_?", "TEST_*"}, + }, + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "other2": "other3", + }, + }, + { + Action: someAction, + Principals: []*tailcfg.SSHPrincipal{{Any: true}}, + SSHUsers: map[string]string{ + "mark": "markthe", + }, + AcceptEnv: []string{"*"}, + }, + }, + }, + ci: &sshConnInfo{sshUser: "alice"}, + wantUser: "", + wantAcceptEnv: nil, + wantMatch: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &conn{ + info: tt.ci, + srv: &server{logf: t.Logf}, + } + got, gotUser, gotAcceptEnv, match := c.evalSSHPolicy(tt.policy, nil) + if match != tt.wantMatch { + t.Errorf("match = %v; want %v", match, tt.wantMatch) + } + if gotUser != tt.wantUser { + t.Errorf("user = %q; want %q", gotUser, tt.wantUser) + } + if tt.wantMatch == true && got == nil { + t.Errorf("expected non-nil action on success") + } + if !slices.Equal(gotAcceptEnv, tt.wantAcceptEnv) { + t.Errorf("acceptEnv = %v; want %v", gotAcceptEnv, tt.wantAcceptEnv) + } }) } } @@ -826,7 +965,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { var logf logger.Logf = t.Logf sys := &tsd.System{} - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry()) if err != nil { t.Fatal(err) } diff --git a/syncs/syncs.go b/syncs/syncs.go index 0d40204d254cc..acc0c88f2991e 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -6,6 +6,7 @@ package syncs import ( "context" + "iter" "sync" "sync/atomic" @@ -252,16 +253,47 @@ func (m *Map[K, V]) Delete(key K) { delete(m.m, key) } -// Range iterates over the map in an undefined order calling f for each entry. -// Iteration stops if f returns false. Map changes are blocked during iteration. +// Keys iterates over all keys in the map in an undefined order. // A read lock is held for the entire duration of the iteration. // Use the [WithLock] method instead to mutate the map during iteration. -func (m *Map[K, V]) Range(f func(key K, value V) bool) { - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.m { - if !f(k, v) { - return +func (m *Map[K, V]) Keys() iter.Seq[K] { + return func(yield func(K) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k := range m.m { + if !yield(k) { + return + } + } + } +} + +// Values iterates over all values in the map in an undefined order. +// A read lock is held for the entire duration of the iteration. +// Use the [WithLock] method instead to mutate the map during iteration. +func (m *Map[K, V]) Values() iter.Seq[V] { + return func(yield func(V) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for _, v := range m.m { + if !yield(v) { + return + } + } + } +} + +// All iterates over all entries in the map in an undefined order. +// A read lock is held for the entire duration of the iteration. +// Use the [WithLock] method instead to mutate the map during iteration. +func (m *Map[K, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for k, v := range m.m { + if !yield(k, v) { + return + } } } } @@ -272,6 +304,9 @@ func (m *Map[K, V]) Range(f func(key K, value V) bool) { func (m *Map[K, V]) WithLock(f func(m2 map[K]V)) { m.mu.Lock() defer m.mu.Unlock() + if m.m == nil { + m.m = make(map[K]V) + } f(m.m) } diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 0748dcb72d6e3..ee3711e76587b 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -160,10 +160,9 @@ func TestMap(t *testing.T) { } got := map[string]int{} want := map[string]int{"one": 1, "two": 2, "three": 3} - m.Range(func(k string, v int) bool { + for k, v := range m.All() { got[k] = v - return true - }) + } if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } @@ -178,10 +177,9 @@ func TestMap(t *testing.T) { m.Delete("noexist") got = map[string]int{} want = map[string]int{} - m.Range(func(k string, v int) bool { + for k, v := range m.All() { got[k] = v - return true - }) + } if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 3cf486b0dee56..df50a860311d1 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -677,6 +677,16 @@ const ( PeerAPIDNS = ServiceProto("peerapi-dns-proxy") ) +// IsKnownServiceProto checks whether sp represents a known-valid value of +// ServiceProto. +func IsKnownServiceProto(sp ServiceProto) bool { + switch sp { + case TCP, UDP, PeerAPI4, PeerAPI6, PeerAPIDNS, ServiceProto("egg"): + return true + } + return false +} + // Service represents a service running on a node. type Service struct { _ structs.Incomparable @@ -2342,6 +2352,10 @@ const ( // NodeAttrDisableCaptivePortalDetection instructs the client to not perform captive portal detection // automatically when the network state changes. NodeAttrDisableCaptivePortalDetection NodeCapability = "disable-captive-portal-detection" + + // NodeAttrSSHEnvironmentVariables enables logic for handling environment variables sent + // via SendEnv in the SSH server and applying them to the SSH session. + NodeAttrSSHEnvironmentVariables NodeCapability = "ssh-env-vars" ) // SetDNSRequest is a request to add a DNS record. @@ -2447,6 +2461,13 @@ type SSHRule struct { // Action is the outcome to task. // A nil or invalid action means to deny. Action *SSHAction `json:"action"` + + // AcceptEnv is a slice of environment variable names that are allowlisted + // for the SSH rule in the policy file. + // + // AcceptEnv values may contain * and ? wildcard characters which match against + // an arbitrary number of characters or a single character respectively. + AcceptEnv []string `json:"acceptEnv,omitempty"` } // SSHPrincipal is either a particular node or a user on any node. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index a98efe4d193b4..61564f3f8bfd4 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -505,6 +505,7 @@ func (src *SSHRule) Clone() *SSHRule { } dst.SSHUsers = maps.Clone(src.SSHUsers) dst.Action = src.Action.Clone() + dst.AcceptEnv = append(src.AcceptEnv[:0:0], src.AcceptEnv...) return dst } @@ -514,6 +515,7 @@ var _SSHRuleCloneNeedsRegeneration = SSHRule(struct { Principals []*SSHPrincipal SSHUsers map[string]string Action *SSHAction + AcceptEnv []string }{}) // Clone makes a deep copy of SSHAction. diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 3bc57ec29edb4..a3e19b0dcec7a 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -1126,6 +1126,7 @@ func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalVie func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.ж.SSHUsers) } func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() } +func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.ж.AcceptEnv) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _SSHRuleViewNeedsRegeneration = SSHRule(struct { @@ -1133,6 +1134,7 @@ var _SSHRuleViewNeedsRegeneration = SSHRule(struct { Principals []*SSHPrincipal SSHUsers map[string]string Action *SSHAction + AcceptEnv []string }{}) // View returns a readonly view of SSHAction. diff --git a/taildrop/taildrop.go b/taildrop/taildrop.go index 9ad0e1a7e1f6b..4d14787afbf54 100644 --- a/taildrop/taildrop.go +++ b/taildrop/taildrop.go @@ -226,9 +226,8 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { // in JSON to clients. They distinguish between empty and non-nil // to know whether a Notify should be able about files. files := make([]ipn.PartialFile, 0) - m.incomingFiles.Range(func(k incomingFileKey, f *incomingFile) bool { + for k, f := range m.incomingFiles.All() { f.mu.Lock() - defer f.mu.Unlock() files = append(files, ipn.PartialFile{ Name: k.name, Started: f.started, @@ -238,8 +237,8 @@ func (m *Manager) IncomingFiles() []ipn.PartialFile { FinalPath: f.finalPath, Done: f.done, }) - return true - }) + f.mu.Unlock() + } return files } diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 1bfd67092af6c..366011fefdd6b 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -26,40 +26,29 @@ repo_root="${BASH_SOURCE%/*}/../.." # being invoked from somewhere else. cd "$repo_root" -toolchain="$HOME/.cache/tailscale-go" +# toolchain, set below, is the root of the Go toolchain we'll use to build +# gocross. +# +# It's set to either an explicit Go toolchain directory (if go.toolchain.rev has +# a value with a leading slash, for testing new toolchains), or otherwise in the +# common case it'll be "$HOME/.cache/tsgo/GITHASH" where GITHASH is the contents +# of the go.toolchain.rev file and the git commit of the +# https://github.com/tailscale/go release artifact to download. +toolchain="" -if [[ -d "$toolchain" ]]; then - # A toolchain exists, but is it recent enough to compile gocross? If not, - # wipe it out so that the next if block fetches a usable one. - want_go_minor=$(grep -E '^go ' "go.mod" | cut -f2 -d'.') - have_go_minor="" - if [[ -f "$toolchain/VERSION" ]]; then - have_go_minor=$(head -1 "$toolchain/VERSION" | cut -f2 -d'.') - fi - # Shortly before stable releases, we run release candidate - # toolchains, which have a non-numeric suffix on the version - # number. Remove the rc qualifier, we just care about the minor - # version. - have_go_minor="${have_go_minor%rc*}" - if [[ -z "$have_go_minor" || "$have_go_minor" -lt "$want_go_minor" ]]; then +read -r REV &2 - case "$REV" in - /*) - toolchain="$REV" - ;; - *) # This works for linux and darwin, which is sufficient # (we do not build tailscale-go for other targets). HOST_OS=$(uname -s | tr A-Z a-z) @@ -71,15 +60,38 @@ if [[ ! -d "$toolchain" ]]; then # Go uses the name "amd64". HOST_ARCH="amd64" fi - - rm -rf "$toolchain" "$toolchain.extracted" curl -f -L -o "$toolchain.tar.gz" "https://github.com/tailscale/go/releases/download/build-${REV}/${HOST_OS}-${HOST_ARCH}.tar.gz" mkdir -p "$toolchain" (cd "$toolchain" && tar --strip-components=1 -xf "$toolchain.tar.gz") echo "$REV" >"$toolchain.extracted" rm -f "$toolchain.tar.gz" - ;; - esac + + # Do some cleanup of old toolchains while we're here. + for hash in $(find "$HOME/.cache/tsgo" -maxdepth 1 -type f -name '*.extracted' -mtime 90 -exec basename {} \; | sed 's/.extracted$//'); do + echo "# Cleaning up old Go toolchain $hash" >&2 + rm -rf "$HOME/.cache/tsgo/$hash" + rm -rf "$HOME/.cache/tsgo/$hash.extracted" + done + fi + ;; +esac + +if [[ -d "$toolchain" ]]; then + # A toolchain exists, but is it recent enough to compile gocross? If not, + # wipe it out so that the next if block fetches a usable one. + want_go_minor="$(grep -E '^go ' "go.mod" | cut -f2 -d'.')" + have_go_minor="" + if [[ -f "$toolchain/VERSION" ]]; then + have_go_minor="$(head -1 "$toolchain/VERSION" | cut -f2 -d'.')" + fi + # Shortly before stable releases, we run release candidate + # toolchains, which have a non-numeric suffix on the version + # number. Remove the rc qualifier, we just care about the minor + # version. + have_go_minor="${have_go_minor%rc*}" + if [[ -z "$have_go_minor" || "$have_go_minor" -lt "$want_go_minor" ]]; then + rm -rf "$toolchain" "$toolchain.extracted" + fi fi # Binaries run with `gocross run` can reinvoke gocross, resulting in a diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index 5980dff046268..e701662f5b1e8 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -55,8 +55,13 @@ func readRevFile(path string) (string, error) { } func getToolchain() (toolchainDir, gorootDir string, err error) { + rev, err := toolchainRev() + if err != nil { + return "", "", err + } + cache := filepath.Join(os.Getenv("HOME"), ".cache") - toolchainDir = filepath.Join(cache, "tailscale-go") + toolchainDir = filepath.Join(cache, "tsgo", rev) gorootDir = filepath.Join(toolchainDir, "gocross-goroot") // You might wonder why getting the toolchain also provisions and returns a diff --git a/tsd/tsd.go b/tsd/tsd.go index 2b5e656267749..acd09560c7601 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -32,6 +32,7 @@ import ( "tailscale.com/net/tstun" "tailscale.com/proxymap" "tailscale.com/types/netmap" + "tailscale.com/util/usermetric" "tailscale.com/wgengine" "tailscale.com/wgengine/magicsock" "tailscale.com/wgengine/router" @@ -65,7 +66,8 @@ type System struct { controlKnobs controlknobs.Knobs proxyMap proxymap.Mapper - healthTracker health.Tracker + healthTracker health.Tracker + userMetricsRegistry usermetric.Registry } // NetstackImpl is the interface that *netstack.Impl implements. @@ -142,6 +144,11 @@ func (s *System) HealthTracker() *health.Tracker { return &s.healthTracker } +// UserMetricsRegistry returns the system usermetrics. +func (s *System) UserMetricsRegistry() *usermetric.Registry { + return &s.userMetricsRegistry +} + // SubSystem represents some subsystem of the Tailscale node daemon. // // A subsystem can be set to a value, and then later retrieved. A subsystem diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index ca6c44ea7f1a9..0be33ba8a5d37 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -536,12 +536,14 @@ func (s *Server) start() (reterr error) { SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), }) if err != nil { return err } closePool.add(s.dialer) sys.Set(eng) + sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) // TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how? ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper(), nil) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 7f6fb00c0d1be..255baf618c0b3 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -5,6 +5,7 @@ package tsnet import ( "bufio" + "bytes" "context" "crypto/ecdsa" "crypto/elliptic" @@ -25,13 +26,15 @@ import ( "os" "path/filepath" "reflect" + "runtime" "strings" "sync" "sync/atomic" "testing" "time" - "github.com/google/go-cmp/cmp" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "golang.org/x/net/proxy" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/health" @@ -507,7 +510,7 @@ func TestStartStopStartGetsSameIP(t *testing.T) { Dir: tmps1, ControlURL: controlURL, Hostname: "s1", - Logf: logger.TestLogger(t), + Logf: tstest.WhileTestRunningLogger(t), } } s1 := newServer() @@ -818,65 +821,226 @@ func TestUDPConn(t *testing.T) { } } +// testWarnable is a Warnable that is used within this package for testing purposes only. +var testWarnable = health.Register(&health.Warnable{ + Code: "test-warnable-tsnet", + Title: "Test warnable", + Severity: health.SeverityLow, + Text: func(args health.Args) string { + return args[health.ArgError] + }, +}) + +func parseMetrics(m []byte) (map[string]float64, error) { + metrics := make(map[string]float64) + + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(bytes.NewReader(m)) + if err != nil { + return nil, err + } + + for _, f := range mf { + for _, ff := range f.Metric { + val := float64(0) + + switch f.GetType() { + case dto.MetricType_COUNTER: + val = ff.GetCounter().GetValue() + case dto.MetricType_GAUGE: + val = ff.GetGauge().GetValue() + } + + metrics[f.GetName()+promMetricLabelsStr(ff.GetLabel())] = val + } + } + + return metrics, nil +} + +func promMetricLabelsStr(labels []*dto.LabelPair) string { + if len(labels) == 0 { + return "" + } + var b strings.Builder + b.WriteString("{") + for i, l := range labels { + if i > 0 { + b.WriteString(",") + } + b.WriteString(fmt.Sprintf("%s=%q", l.GetName(), l.GetValue())) + } + b.WriteString("}") + return b.String() +} + func TestUserMetrics(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/13420") tstest.ResourceCheck(t) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() - // testWarnable is a Warnable that is used within this package for testing purposes only. - var testWarnable = health.Register(&health.Warnable{ - Code: "test-warnable-tsnet", - Title: "Test warnable", - Severity: health.SeverityLow, - Text: func(args health.Args) string { - return args[health.ArgError] - }, - }) - controlURL, c := startControl(t) - s1, _, s1PubKey := startServer(t, ctx, controlURL, "s1") + s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1") + s2, _, _ := startServer(t, ctx, controlURL, "s2") s1.lb.EditPrefs(&ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: []netip.Prefix{ netip.MustParsePrefix("192.0.2.0/24"), netip.MustParsePrefix("192.0.3.0/24"), + netip.MustParsePrefix("192.0.5.1/32"), + netip.MustParsePrefix("0.0.0.0/0"), }, }, AdvertiseRoutesSet: true, }) - c.SetSubnetRoutes(s1PubKey, []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}) + c.SetSubnetRoutes(s1PubKey, []netip.Prefix{ + netip.MustParsePrefix("192.0.2.0/24"), + netip.MustParsePrefix("192.0.5.1/32"), + netip.MustParsePrefix("0.0.0.0/0"), + }) lc1, err := s1.LocalClient() if err != nil { t.Fatal(err) } + lc2, err := s2.LocalClient() + if err != nil { + t.Fatal(err) + } + + // ping to make sure the connection is up. + res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP) + if err != nil { + t.Fatalf("pinging: %s", err) + } + t.Logf("ping success: %#+v", res) + ht := s1.lb.HealthTracker() ht.SetUnhealthy(testWarnable, health.Args{"Text": "Hello world 1"}) - metrics1, err := lc1.UserMetrics(ctx) + // Force an update to the netmap to ensure that the metrics are up-to-date. + s1.lb.DebugForceNetmapUpdate() + s2.lb.DebugForceNetmapUpdate() + + wantRoutes := float64(2) + if runtime.GOOS == "windows" { + wantRoutes = 0 + } + + // Wait for the routes to be propagated to node 1 to ensure + // that the metrics are up-to-date. + waitForCondition(t, "primary routes available for node1", 90*time.Second, func() bool { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + status1, err := lc1.Status(ctx) + if err != nil { + t.Logf("getting status: %s", err) + return false + } + if runtime.GOOS == "windows" { + // Windows does not seem to support or report back routes when running in + // userspace via tsnet. So, we skip this check on Windows. + // TODO(kradalby): Figure out if this is correct. + return true + } + // Wait for the primary routes to reach our desired routes, which is wantRoutes + 1, because + // the PrimaryRoutes list will contain a exit node route, which the metric does not count. + return status1.Self.PrimaryRoutes != nil && status1.Self.PrimaryRoutes.Len() == int(wantRoutes)+1 + }) + + ctxLc, cancelLc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelLc() + metrics1, err := lc1.UserMetrics(ctxLc) + if err != nil { + t.Fatal(err) + } + + status1, err := lc1.Status(ctxLc) + if err != nil { + t.Fatal(err) + } + + parsedMetrics1, err := parseMetrics(metrics1) + if err != nil { + t.Fatal(err) + } + + t.Logf("Metrics1:\n%s\n", metrics1) + + // The node is advertising 4 routes: + // - 192.0.2.0/24 + // - 192.0.3.0/24 + // - 192.0.5.1/32 + if got, want := parsedMetrics1["tailscaled_advertised_routes"], 3.0; got != want { + t.Errorf("metrics1, tailscaled_advertised_routes: got %v, want %v", got, want) + } + + // The control has approved 2 routes: + // - 192.0.2.0/24 + // - 192.0.5.1/32 + if got, want := parsedMetrics1["tailscaled_approved_routes"], wantRoutes; got != want { + t.Errorf("metrics1, tailscaled_approved_routes: got %v, want %v", got, want) + } + + // Validate the health counter metric against the status of the node + if got, want := parsedMetrics1[`tailscaled_health_messages{type="warning"}`], float64(len(status1.Health)); got != want { + t.Errorf("metrics1, tailscaled_health_messages: got %v, want %v", got, want) + } + + // The node is the primary subnet router for 2 routes: + // - 192.0.2.0/24 + // - 192.0.5.1/32 + if got, want := parsedMetrics1["tailscaled_primary_routes"], wantRoutes; got != want { + t.Errorf("metrics1, tailscaled_primary_routes: got %v, want %v", got, want) + } + + metrics2, err := lc2.UserMetrics(ctx) if err != nil { t.Fatal(err) } - // Note that this test will check for two warnings because the health - // tracker will have two warnings: one from the testWarnable, added in - // this test, and one because we are running the dev/unstable version - // of tailscale. - want := `# TYPE tailscaled_advertised_routes gauge -# HELP tailscaled_advertised_routes Number of advertised network routes (e.g. by a subnet router) -tailscaled_advertised_routes 2 -# TYPE tailscaled_health_messages gauge -# HELP tailscaled_health_messages Number of health messages broken down by type. -tailscaled_health_messages{type="warning"} 2 -# TYPE tailscaled_inbound_dropped_packets_total counter -# HELP tailscaled_inbound_dropped_packets_total Counts the number of dropped packets received by the node from other peers -# TYPE tailscaled_outbound_dropped_packets_total counter -# HELP tailscaled_outbound_dropped_packets_total Counts the number of packets dropped while being sent to other peers -` + status2, err := lc2.Status(ctx) + if err != nil { + t.Fatal(err) + } - if diff := cmp.Diff(want, string(metrics1)); diff != "" { - t.Fatalf("unexpected metrics (-want +got):\n%s", diff) + parsedMetrics2, err := parseMetrics(metrics2) + if err != nil { + t.Fatal(err) + } + + t.Logf("Metrics2:\n%s\n", metrics2) + + // The node is advertising 0 routes + if got, want := parsedMetrics2["tailscaled_advertised_routes"], 0.0; got != want { + t.Errorf("metrics2, tailscaled_advertised_routes: got %v, want %v", got, want) + } + + // The control has approved 0 routes + if got, want := parsedMetrics2["tailscaled_approved_routes"], 0.0; got != want { + t.Errorf("metrics2, tailscaled_approved_routes: got %v, want %v", got, want) + } + + // Validate the health counter metric against the status of the node + if got, want := parsedMetrics2[`tailscaled_health_messages{type="warning"}`], float64(len(status2.Health)); got != want { + t.Errorf("metrics2, tailscaled_health_messages: got %v, want %v", got, want) + } + + // The node is the primary subnet router for 0 routes + if got, want := parsedMetrics2["tailscaled_primary_routes"], 0.0; got != want { + t.Errorf("metrics2, tailscaled_primary_routes: got %v, want %v", got, want) + } +} + +func waitForCondition(t *testing.T, msg string, waitTime time.Duration, f func() bool) { + t.Helper() + for deadline := time.Now().Add(waitTime); time.Now().Before(deadline); time.Sleep(1 * time.Second) { + if f() { + return + } } + t.Fatalf("waiting for condition: %s", msg) } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index ecb655fe9f474..70c5d68c336e0 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1791,6 +1791,7 @@ func (n *testNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { cmd.Args = append(cmd.Args, "--config="+n.configFile) } cmd.Env = append(os.Environ(), + "TS_CONTROL_IS_PLAINTEXT_HTTP=1", "TS_DEBUG_PERMIT_HTTP_C2N=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, "HTTP_PROXY="+n.env.TrafficTrapServer.URL, diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 44ed2da060c99..bbcf277d171e1 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -366,6 +366,7 @@ func (s *Server) serveMachine(w http.ResponseWriter, r *http.Request) { func (s *Server) SetSubnetRoutes(nodeKey key.NodePublic, routes []netip.Prefix) { s.mu.Lock() defer s.mu.Unlock() + s.logf("Setting subnet routes for %s: %v", nodeKey.ShortString(), routes) mak.Set(&s.nodeSubnetRoutes, nodeKey, routes) } @@ -1018,6 +1019,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() defer s.mu.Unlock() + res.Node.PrimaryRoutes = s.nodeSubnetRoutes[nk] res.Node.AllowedIPs = append(res.Node.Addresses, s.nodeSubnetRoutes[nk]...) // Consume a PingRequest while protected by mutex if it exists diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index 08df9a9304ead..ab69f1c2b0649 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -14,9 +14,16 @@ func TestDeps(t *testing.T) { GOOS: "ios", GOARCH: "arm64", BadDeps: map[string]string{ - "testing": "do not use testing package in production code", - "text/template": "linker bloat (MethodByName)", - "html/template": "linker bloat (MethodByName)", + "testing": "do not use testing package in production code", + "text/template": "linker bloat (MethodByName)", + "html/template": "linker bloat (MethodByName)", + "tailscale.com/net/wsconn": "https://github.com/tailscale/tailscale/issues/13762", + "github.com/coder/websocket": "https://github.com/tailscale/tailscale/issues/13762", + "github.com/mitchellh/go-ps": "https://github.com/tailscale/tailscale/pull/13759", + "database/sql/driver": "iOS doesn't use an SQL database", + "github.com/google/uuid": "see tailscale/tailscale#13760", + "tailscale.com/clientupdate/distsign": "downloads via AppStore, not distsign", + "github.com/tailscale/hujson": "no config file support on iOS", }, }.Check(t) } diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 919ae1fa163be..e7991b3e6ef5d 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -974,13 +974,12 @@ func (n *network) writeEth(res []byte) bool { if dstMAC.IsBroadcast() || (n.v6 && etherType == layers.EthernetTypeIPv6 && dstMAC == macAllNodes) { num := 0 - n.writers.Range(func(mac MAC, nw networkWriter) bool { + for mac, nw := range n.writers.All() { if mac != srcMAC { num++ nw.write(res) } - return true - }) + } return num > 0 } if srcMAC == dstMAC { diff --git a/tstest/test-wishlist.md b/tstest/test-wishlist.md new file mode 100644 index 0000000000000..eb4601b929650 --- /dev/null +++ b/tstest/test-wishlist.md @@ -0,0 +1,20 @@ +# Testing wishlist + +This is a list of tests we'd like to add one day, as our e2e/natlab/VM +testing infrastructure matures. + +We're going to start collecting ideas as we develop PRs (updating this +wishlist in the same PR that adds something that could be better +tested) and then use this list to inform the order we build out our +future testing machinery. + +For each item, try to include a `#nnn` or `tailscale/corp#nnn` +reference to an issue or PR about the feature. + +# The list + +- Link-local multicast, and mDNS/LLMNR specifically, when an exit node is used, + both with and without the "Allow local network access" option enabled. + When the option is disabled, we should still permit it for internal interfaces, + such as Hyper-V/WSL2 on Windows. + diff --git a/types/dnstype/messagetypes-string.go b/types/dnstype/messagetypes-string.go new file mode 100644 index 0000000000000..34abea1ba947b --- /dev/null +++ b/types/dnstype/messagetypes-string.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package dnstype + +import ( + "errors" + "strings" + + "golang.org/x/net/dns/dnsmessage" +) + +// StringForType returns the string representation of a dnsmessage.Type. +// For example, StringForType(dnsmessage.TypeA) returns "A". +func StringForDNSMessageType(t dnsmessage.Type) string { + switch t { + case dnsmessage.TypeAAAA: + return "AAAA" + case dnsmessage.TypeALL: + return "ALL" + case dnsmessage.TypeA: + return "A" + case dnsmessage.TypeCNAME: + return "CNAME" + case dnsmessage.TypeHINFO: + return "HINFO" + case dnsmessage.TypeMINFO: + return "MINFO" + case dnsmessage.TypeMX: + return "MX" + case dnsmessage.TypeNS: + return "NS" + case dnsmessage.TypeOPT: + return "OPT" + case dnsmessage.TypePTR: + return "PTR" + case dnsmessage.TypeSOA: + return "SOA" + case dnsmessage.TypeSRV: + return "SRV" + case dnsmessage.TypeTXT: + return "TXT" + case dnsmessage.TypeWKS: + return "WKS" + } + return "UNKNOWN" +} + +// DNSMessageTypeForString returns the dnsmessage.Type for the given string. +// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. +func DNSMessageTypeForString(s string) (t dnsmessage.Type, err error) { + s = strings.TrimSpace(strings.ToUpper(s)) + switch s { + case "AAAA": + return dnsmessage.TypeAAAA, nil + case "ALL": + return dnsmessage.TypeALL, nil + case "A": + return dnsmessage.TypeA, nil + case "CNAME": + return dnsmessage.TypeCNAME, nil + case "HINFO": + return dnsmessage.TypeHINFO, nil + case "MINFO": + return dnsmessage.TypeMINFO, nil + case "MX": + return dnsmessage.TypeMX, nil + case "NS": + return dnsmessage.TypeNS, nil + case "OPT": + return dnsmessage.TypeOPT, nil + case "PTR": + return dnsmessage.TypePTR, nil + case "SOA": + return dnsmessage.TypeSOA, nil + case "SRV": + return dnsmessage.TypeSRV, nil + case "TXT": + return dnsmessage.TypeTXT, nil + case "WKS": + return dnsmessage.TypeWKS, nil + } + return 0, errors.New("unknown DNS message type: " + s) +} diff --git a/types/key/nl.go b/types/key/nl.go index e0b4e5ca61943..50caed98c2d0b 100644 --- a/types/key/nl.go +++ b/types/key/nl.go @@ -131,10 +131,10 @@ func NLPublicFromEd25519Unsafe(public ed25519.PublicKey) NLPublic { // is able to decode both the CLI form (tlpub:) & the // regular form (nlpub:). func (k *NLPublic) UnmarshalText(b []byte) error { - if mem.HasPrefix(mem.B(b), mem.S(nlPublicHexPrefixCLI)) { - return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefixCLI)) + if mem.HasPrefix(mem.B(b), mem.S(nlPublicHexPrefix)) { + return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefix)) } - return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefix)) + return parseHex(k.k[:], mem.B(b), mem.S(nlPublicHexPrefixCLI)) } // AppendText implements encoding.TextAppender. diff --git a/types/lazy/deferred.go b/types/lazy/deferred.go new file mode 100644 index 0000000000000..964553cef6524 --- /dev/null +++ b/types/lazy/deferred.go @@ -0,0 +1,98 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "sync" + "sync/atomic" + + "tailscale.com/types/ptr" +) + +// DeferredInit allows one or more funcs to be deferred +// until [DeferredInit.Do] is called for the first time. +// +// DeferredInit is safe for concurrent use. +type DeferredInit struct { + DeferredFuncs +} + +// DeferredFuncs allows one or more funcs to be deferred +// until the owner's [DeferredInit.Do] method is called +// for the first time. +// +// DeferredFuncs is safe for concurrent use. +type DeferredFuncs struct { + m sync.Mutex + funcs []func() error + + // err is either: + // * nil, if deferred init has not yet been completed + // * nilErrPtr, if initialization completed successfully + // * non-nil and not nilErrPtr, if there was an error + // + // It is an atomic.Pointer so it can be read without m held. + err atomic.Pointer[error] +} + +// Defer adds a function to be called when [DeferredInit.Do] +// is called for the first time. It returns true on success, +// or false if [DeferredInit.Do] has already been called. +func (d *DeferredFuncs) Defer(f func() error) bool { + d.m.Lock() + defer d.m.Unlock() + if d.err.Load() != nil { + return false + } + d.funcs = append(d.funcs, f) + return true +} + +// MustDefer is like [DeferredFuncs.Defer], but panics +// if [DeferredInit.Do] has already been called. +func (d *DeferredFuncs) MustDefer(f func() error) { + if !d.Defer(f) { + panic("deferred init already completed") + } +} + +// Do calls previously deferred init functions if it is being called +// for the first time on this instance of [DeferredInit]. +// It stops and returns an error if any init function returns an error. +// +// It is safe for concurrent use, and the deferred init is guaranteed +// to have been completed, either successfully or with an error, +// when Do() returns. +func (d *DeferredInit) Do() error { + err := d.err.Load() + if err == nil { + err = d.doSlow() + } + return *err +} + +func (d *DeferredInit) doSlow() (err *error) { + d.m.Lock() + defer d.m.Unlock() + if err := d.err.Load(); err != nil { + return err + } + defer func() { + d.err.Store(err) + d.funcs = nil // do not keep funcs alive after invoking + }() + for _, f := range d.funcs { + if err := f(); err != nil { + return ptr.To(err) + } + } + return nilErrPtr +} + +// Funcs is a shorthand for &d.DeferredFuncs. +// The returned value can safely be passed to external code, +// allowing to defer init funcs without also exposing [DeferredInit.Do]. +func (d *DeferredInit) Funcs() *DeferredFuncs { + return &d.DeferredFuncs +} diff --git a/types/lazy/deferred_test.go b/types/lazy/deferred_test.go new file mode 100644 index 0000000000000..9de16c67a6067 --- /dev/null +++ b/types/lazy/deferred_test.go @@ -0,0 +1,277 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" +) + +func ExampleDeferredInit() { + // DeferredInit allows both registration and invocation of the + // deferred funcs. It should remain internal to the code that "owns" it. + var di DeferredInit + // Deferred funcs will not be executed until [DeferredInit.Do] is called. + deferred := di.Defer(func() error { + fmt.Println("Internal init") + return nil + }) + // [DeferredInit.Defer] reports whether the function was successfully deferred. + // A func can only fail to defer if [DeferredInit.Do] has already been called. + if deferred { + fmt.Printf("Internal init has been deferred\n\n") + } + + // If necessary, the value returned by [DeferredInit.Funcs] + // can be shared with external code to facilitate deferring + // funcs without allowing it to call [DeferredInit.Do]. + df := di.Funcs() + // If a certain init step must be completed for the program + // to function correctly, and failure to defer it indicates + // a coding error, use [DeferredFuncs.MustDefer] instead of + // [DeferredFuncs.Defer]. It panics if Do() has already been called. + df.MustDefer(func() error { + fmt.Println("External init - 1") + return nil + }) + // A deferred func may return an error to indicate a failed init. + // If a deferred func returns an error, execution stops + // and the error is propagated to the caller. + df.Defer(func() error { + fmt.Println("External init - 2") + return errors.New("bang!") + }) + // The deferred function below won't be executed. + df.Defer(func() error { + fmt.Println("Unreachable") + return nil + }) + + // When [DeferredInit]'s owner needs initialization to be completed, + // it can call [DeferredInit.Do]. When called for the first time, + // it invokes the deferred funcs. + err := di.Do() + if err != nil { + fmt.Printf("Deferred init failed: %v\n", err) + } + // [DeferredInit.Do] is safe for concurrent use and can be called + // multiple times by the same or different goroutines. + // However, the deferred functions are never invoked more than once. + // If the deferred init fails on the first attempt, all subsequent + // [DeferredInit.Do] calls will return the same error. + if err = di.Do(); err != nil { + fmt.Printf("Deferred init failed: %v\n\n", err) + } + + // Additionally, all subsequent attempts to defer a function will fail + // after [DeferredInit.Do] has been called. + deferred = di.Defer(func() error { + fmt.Println("Unreachable") + return nil + }) + if !deferred { + fmt.Println("Cannot defer a func once init has been completed") + } + + // Output: + // Internal init has been deferred + // + // Internal init + // External init - 1 + // External init - 2 + // Deferred init failed: bang! + // Deferred init failed: bang! + // + // Cannot defer a func once init has been completed +} + +func TestDeferredInit(t *testing.T) { + tests := []struct { + name string + numFuncs int + }{ + { + name: "no-funcs", + numFuncs: 0, + }, + { + name: "one-func", + numFuncs: 1, + }, + { + name: "many-funcs", + numFuncs: 1000, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var di DeferredInit + + calls := make([]atomic.Bool, tt.numFuncs) // whether N-th func has been called + checkCalls := func() { + t.Helper() + for i := range calls { + if !calls[i].Load() { + t.Errorf("Func #%d has never been called", i) + } + } + } + + // Defer funcs concurrently across multiple goroutines. + var wg sync.WaitGroup + wg.Add(tt.numFuncs) + for i := range tt.numFuncs { + go func() { + f := func() error { + if calls[i].Swap(true) { + t.Errorf("Func #%d has already been called", i) + } + return nil + } + if !di.Defer(f) { + t.Errorf("Func #%d cannot be deferred", i) + return + } + wg.Done() + }() + } + // Wait for all funcs to be deferred. + wg.Wait() + + // Call [DeferredInit.Do] concurrently. + const N = 10000 + for range N { + wg.Add(1) + go func() { + gotErr := di.Do() + checkError(t, gotErr, nil, false) + checkCalls() + wg.Done() + }() + } + wg.Wait() + }) + } +} + +func TestDeferredErr(t *testing.T) { + tests := []struct { + name string + funcs []func() error + wantErr error + }{ + { + name: "no-funcs", + wantErr: nil, + }, + { + name: "no-error", + funcs: []func() error{func() error { return nil }}, + wantErr: nil, + }, + { + name: "error", + funcs: []func() error{ + func() error { return nil }, + func() error { return errors.New("bang!") }, + func() error { return errors.New("unreachable") }, + }, + wantErr: errors.New("bang!"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var di DeferredInit + for _, f := range tt.funcs { + di.MustDefer(f) + } + + var wg sync.WaitGroup + N := 10000 + for range N { + wg.Add(1) + go func() { + gotErr := di.Do() + checkError(t, gotErr, tt.wantErr, false) + wg.Done() + }() + } + wg.Wait() + }) + } +} + +func TestDeferAfterDo(t *testing.T) { + var di DeferredInit + var deferred, called atomic.Int32 + + deferOnce := func() bool { + ok := di.Defer(func() error { + called.Add(1) + return nil + }) + if ok { + deferred.Add(1) + } + return ok + } + + // Deferring a func before calling [DeferredInit.Do] should always succeed. + if !deferOnce() { + t.Fatal("Failed to defer a func") + } + + // Defer up to N funcs concurrently while [DeferredInit.Do] is being called by the main goroutine. + // Since we'll likely attempt to defer some funcs after [DeferredInit.Do] has been called, + // we expect these late defers to fail, and the funcs will not be deferred or executed. + // However, the number of the deferred and called funcs should always be equal when [DeferredInit.Do] exits. + const N = 10000 + var wg sync.WaitGroup + for range N { + wg.Add(1) + go func() { + deferOnce() + wg.Done() + }() + } + + if err := di.Do(); err != nil { + t.Fatalf("DeferredInit.Do() failed: %v", err) + } + wantDeferred, wantCalled := deferred.Load(), called.Load() + + if deferOnce() { + t.Error("An init func was deferred after DeferredInit.Do() returned") + } + + // Wait for the goroutines deferring init funcs to exit. + // No funcs should be deferred after DeferredInit.Do() has returned, + // so the deferred and called counters should remain unchanged. + wg.Wait() + if gotDeferred := deferred.Load(); gotDeferred != wantDeferred { + t.Errorf("An init func was deferred after DeferredInit.Do() returned. Got %d, want %d", gotDeferred, wantDeferred) + } + if gotCalled := called.Load(); gotCalled != wantCalled { + t.Errorf("An init func was called after DeferredInit.Do() returned. Got %d, want %d", gotCalled, wantCalled) + } + if deferred, called := deferred.Load(), called.Load(); deferred != called { + t.Errorf("Deferred: %d; Called: %d", deferred, called) + } +} + +func checkError(tb testing.TB, got, want error, fatal bool) { + tb.Helper() + f := tb.Errorf + if fatal { + f = tb.Fatalf + } + if (want == nil && got != nil) || + (want != nil && got == nil) || + (want != nil && got != nil && want.Error() != got.Error()) { + f("gotErr: %v; wantErr: %v", got, want) + } +} diff --git a/types/views/views.go b/types/views/views.go index b99a20a488bbd..19aa69d4a8edb 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -147,6 +147,17 @@ type SliceView[T ViewCloner[T, V], V StructView[T]] struct { ж []T } +// All returns an iterator over v. +func (v SliceView[T, V]) All() iter.Seq2[int, V] { + return func(yield func(int, V) bool) { + for i := range v.ж { + if !yield(i, v.ж[i].View()) { + return + } + } + } +} + // MarshalJSON implements json.Marshaler. func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } @@ -429,6 +440,17 @@ func (m MapSlice[K, V]) AsMap() map[K][]V { return out } +// All returns an iterator iterating over the keys and values of m. +func (m MapSlice[K, V]) All() iter.Seq2[K, Slice[V]] { + return func(yield func(K, Slice[V]) bool) { + for k, v := range m.ж { + if !yield(k, SliceOf(v)) { + return + } + } + } +} + // Map provides a read-only view of a map. It is the caller's responsibility to // make sure V is immutable. type Map[K comparable, V any] struct { @@ -515,6 +537,18 @@ func (m Map[K, V]) Range(f MapRangeFn[K, V]) { } } +// All returns an iterator iterating over the keys +// and values of m. +func (m Map[K, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for k, v := range m.ж { + if !yield(k, v) { + return + } + } + } +} + // MapFnOf returns a MapFn for m. func MapFnOf[K comparable, T any, V any](m map[K]T, f func(T) V) MapFn[K, T, V] { return MapFn[K, T, V]{ @@ -576,6 +610,17 @@ func (m MapFn[K, T, V]) Range(f MapRangeFn[K, V]) { } } +// All returns an iterator iterating over the keys and value views of m. +func (m MapFn[K, T, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for k, v := range m.ж { + if !yield(k, m.wrapv(v)) { + return + } + } + } +} + // ContainsPointers reports whether T contains any pointers, // either explicitly or implicitly. // It has special handling for some types that contain pointers diff --git a/types/views/views_test.go b/types/views/views_test.go index 24118d0997078..8a1ff3fddfc9e 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -426,3 +426,78 @@ func TestSliceRange(t *testing.T) { t.Errorf("got %q; want %q", got, want) } } + +type testStruct struct{ value string } + +func (p *testStruct) Clone() *testStruct { + if p == nil { + return p + } + return &testStruct{p.value} +} +func (p *testStruct) View() testStructView { return testStructView{p} } + +type testStructView struct{ p *testStruct } + +func (v testStructView) Valid() bool { return v.p != nil } +func (v testStructView) AsStruct() *testStruct { + if v.p == nil { + return nil + } + return v.p.Clone() +} +func (v testStructView) ValueForTest() string { return v.p.value } + +func TestSliceViewRange(t *testing.T) { + vs := SliceOfViews([]*testStruct{{value: "foo"}, {value: "bar"}}) + var got []string + for i, v := range vs.All() { + got = append(got, fmt.Sprintf("%d-%s", i, v.AsStruct().value)) + } + want := []string{"0-foo", "1-bar"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestMapIter(t *testing.T) { + m := MapOf(map[string]int{"foo": 1, "bar": 2}) + var got []string + for k, v := range m.All() { + got = append(got, fmt.Sprintf("%s-%d", k, v)) + } + slices.Sort(got) + want := []string{"bar-2", "foo-1"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestMapSliceIter(t *testing.T) { + m := MapSliceOf(map[string][]int{"foo": {3, 4}, "bar": {1, 2}}) + var got []string + for k, v := range m.All() { + got = append(got, fmt.Sprintf("%s-%d", k, v)) + } + slices.Sort(got) + want := []string{"bar-{[1 2]}", "foo-{[3 4]}"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestMapFnIter(t *testing.T) { + m := MapFnOf[string, *testStruct, testStructView](map[string]*testStruct{ + "foo": {value: "fooVal"}, + "bar": {value: "barVal"}, + }, func(p *testStruct) testStructView { return testStructView{p} }) + var got []string + for k, v := range m.All() { + got = append(got, fmt.Sprintf("%v-%v", k, v.ValueForTest())) + } + slices.Sort(got) + want := []string{"bar-barVal", "foo-fooVal"} + if !slices.Equal(got, want) { + t.Errorf("got %q; want %q", got, want) + } +} diff --git a/util/linuxfw/iptables_for_svcs.go b/util/linuxfw/iptables_for_svcs.go new file mode 100644 index 0000000000000..8e0f5d48d0d0a --- /dev/null +++ b/util/linuxfw/iptables_for_svcs.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package linuxfw + +import ( + "fmt" + "net/netip" +) + +// This file contains functionality to insert portmapping rules for a 'service'. +// These are currently only used by the Kubernetes operator proxies. +// An iptables rule for such a service contains a comment with the service name. + +// EnsurePortMapRuleForSvc adds a prerouting rule that forwards traffic received +// on match port and NOT on the provided interface to target IP and target port. +// Rule will only be added if it does not already exists. +func (i *iptablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error { + table := i.getIPTByAddr(targetIP) + args := argsForPortMapRule(svc, tun, targetIP, pm) + exists, err := table.Exists("nat", "PREROUTING", args...) + if err != nil { + return fmt.Errorf("error checking if rule exists: %w", err) + } + if !exists { + return table.Append("nat", "PREROUTING", args...) + } + return nil +} + +// DeleteMapRuleForSvc constructs a prerouting rule as would be created by +// EnsurePortMapRuleForSvc with the provided args and, if such a rule exists, +// deletes it. +func (i *iptablesRunner) DeletePortMapRuleForSvc(svc, excludeI string, targetIP netip.Addr, pm PortMap) error { + table := i.getIPTByAddr(targetIP) + args := argsForPortMapRule(svc, excludeI, targetIP, pm) + exists, err := table.Exists("nat", "PREROUTING", args...) + if err != nil { + return fmt.Errorf("error checking if rule exists: %w", err) + } + if exists { + return table.Delete("nat", "PREROUTING", args...) + } + return nil +} + +// DeleteSvc constructs all possible rules that would have been created by +// EnsurePortMapRuleForSvc from the provided args and ensures that each one that +// exists is deleted. +func (i *iptablesRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pms []PortMap) error { + for _, tip := range targetIPs { + for _, pm := range pms { + if err := i.DeletePortMapRuleForSvc(svc, tun, tip, pm); err != nil { + return fmt.Errorf("error deleting rule: %w", err) + } + } + } + return nil +} + +func argsForPortMapRule(svc, excludeI string, targetIP netip.Addr, pm PortMap) []string { + c := commentForSvc(svc, pm) + return []string{ + "!", "-i", excludeI, + "-p", pm.Protocol, + "--dport", fmt.Sprintf("%d", pm.MatchPort), + "-m", "comment", "--comment", c, + "-j", "DNAT", + "--to-destination", fmt.Sprintf("%v:%v", targetIP, pm.TargetPort), + } +} + +// commentForSvc generates a comment to be added to an iptables DNAT rule for a +// service. This is for iptables debugging/readability purposes only. +func commentForSvc(svc string, pm PortMap) string { + return fmt.Sprintf("%s:%s:%d -> %s:%d", svc, pm.Protocol, pm.MatchPort, pm.Protocol, pm.TargetPort) +} diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go new file mode 100644 index 0000000000000..99b2f517f1eaf --- /dev/null +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package linuxfw + +import ( + "net/netip" + "testing" +) + +func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { + v4Addr := netip.MustParseAddr("10.0.0.4") + v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} + testPM2 := PortMap{Protocol: "udp", MatchPort: 4004, TargetPort: 53} + v4Rule := argsForPortMapRule("test-svc", "tailscale0", v4Addr, testPM) + tests := []struct { + name string + targetIP netip.Addr + svc string + pm PortMap + precreateSvcRules [][]string + }{ + { + name: "pm_for_ipv4", + targetIP: v4Addr, + svc: "test-svc", + pm: testPM, + }, + { + name: "pm_for_ipv6", + targetIP: v6Addr, + svc: "test-svc-2", + pm: testPM2, + }, + { + name: "add_existing_rule", + targetIP: v4Addr, + svc: "test-svc", + pm: testPM, + precreateSvcRules: [][]string{v4Rule}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + iptr := NewFakeIPTablesRunner() + table := iptr.getIPTByAddr(tt.targetIP) + for _, ruleset := range tt.precreateSvcRules { + mustPrecreatePortMapRule(t, ruleset, table) + } + if err := iptr.EnsurePortMapRuleForSvc(tt.svc, "tailscale0", tt.targetIP, tt.pm); err != nil { + t.Errorf("[unexpected error] iptablesRunner.EnsurePortMapRuleForSvc() = %v", err) + } + args := argsForPortMapRule(tt.svc, "tailscale0", tt.targetIP, tt.pm) + exists, err := table.Exists("nat", "PREROUTING", args...) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if !exists { + t.Errorf("expected rule was not created") + } + }) + } +} + +func Test_iptablesRunner_DeletePortMapRuleForSvc(t *testing.T) { + v4Addr := netip.MustParseAddr("10.0.0.4") + v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} + v4Rule := argsForPortMapRule("test", "tailscale0", v4Addr, testPM) + v6Rule := argsForPortMapRule("test", "tailscale0", v6Addr, testPM) + + tests := []struct { + name string + targetIP netip.Addr + svc string + pm PortMap + precreateSvcRules [][]string + }{ + { + name: "multiple_rules_ipv4_deleted", + targetIP: v4Addr, + svc: "test", + pm: testPM, + precreateSvcRules: [][]string{v4Rule, v6Rule}, + }, + { + name: "multiple_rules_ipv6_deleted", + targetIP: v6Addr, + svc: "test", + pm: testPM, + precreateSvcRules: [][]string{v4Rule, v6Rule}, + }, + { + name: "non-existent_rule_deleted", + targetIP: v4Addr, + svc: "test", + pm: testPM, + precreateSvcRules: [][]string{v6Rule}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + iptr := NewFakeIPTablesRunner() + table := iptr.getIPTByAddr(tt.targetIP) + for _, ruleset := range tt.precreateSvcRules { + mustPrecreatePortMapRule(t, ruleset, table) + } + if err := iptr.DeletePortMapRuleForSvc(tt.svc, "tailscale0", tt.targetIP, tt.pm); err != nil { + t.Errorf("iptablesRunner.DeletePortMapRuleForSvc() errored: %v ", err) + } + deletedRule := argsForPortMapRule(tt.svc, "tailscale0", tt.targetIP, tt.pm) + exists, err := table.Exists("nat", "PREROUTING", deletedRule...) + if err != nil { + t.Fatalf("error verifying that rule does not exist after deletion: %v", err) + } + if exists { + t.Errorf("portmap rule exists after deletion") + } + }) + } +} + +func Test_iptablesRunner_DeleteSvc(t *testing.T) { + v4Addr := netip.MustParseAddr("10.0.0.4") + v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} + iptr := NewFakeIPTablesRunner() + + // create two rules that will consitute svc1 + s1R1 := argsForPortMapRule("svc1", "tailscale0", v4Addr, testPM) + mustPrecreatePortMapRule(t, s1R1, iptr.getIPTByAddr(v4Addr)) + s1R2 := argsForPortMapRule("svc1", "tailscale0", v6Addr, testPM) + mustPrecreatePortMapRule(t, s1R2, iptr.getIPTByAddr(v6Addr)) + + // create two rules that will consitute svc2 + s2R1 := argsForPortMapRule("svc2", "tailscale0", v4Addr, testPM) + mustPrecreatePortMapRule(t, s2R1, iptr.getIPTByAddr(v4Addr)) + s2R2 := argsForPortMapRule("svc2", "tailscale0", v6Addr, testPM) + mustPrecreatePortMapRule(t, s2R2, iptr.getIPTByAddr(v6Addr)) + + // delete svc1 + if err := iptr.DeleteSvc("svc1", "tailscale0", []netip.Addr{v4Addr, v6Addr}, []PortMap{testPM}); err != nil { + t.Fatalf("error deleting service: %v", err) + } + + // validate that svc1 no longer exists + svcMustNotExist(t, "svc1", map[string][]string{v4Addr.String(): s1R1, v6Addr.String(): s1R2}, iptr) + + // validate that svc2 still exists + svcMustExist(t, "svc2", map[string][]string{v4Addr.String(): s2R1, v6Addr.String(): s2R2}, iptr) +} + +func svcMustExist(t *testing.T, svcName string, rules map[string][]string, iptr *iptablesRunner) { + t.Helper() + for dst, ruleset := range rules { + tip := netip.MustParseAddr(dst) + exists, err := iptr.getIPTByAddr(tip).Exists("nat", "PREROUTING", ruleset...) + if err != nil { + t.Fatalf("error checking whether %s exists: %v", svcName, err) + } + if !exists { + t.Fatalf("service %s should be deleted,but found rule for %s", svcName, dst) + } + } +} + +func svcMustNotExist(t *testing.T, svcName string, rules map[string][]string, iptr *iptablesRunner) { + t.Helper() + for dst, ruleset := range rules { + tip := netip.MustParseAddr(dst) + exists, err := iptr.getIPTByAddr(tip).Exists("nat", "PREROUTING", ruleset...) + if err != nil { + t.Fatalf("error checking whether %s exists: %v", svcName, err) + } + if exists { + t.Fatalf("service %s should exist, but rule for %s is missing", svcName, dst) + } + } +} + +func mustPrecreatePortMapRule(t *testing.T, rules []string, table iptablesInterface) { + t.Helper() + exists, err := table.Exists("nat", "PREROUTING", rules...) + if err != nil { + t.Fatalf("error ensuring that nat PREROUTING table exists: %v", err) + } + if exists { + return + } + if err := table.Append("nat", "PREROUTING", rules...); err != nil { + t.Fatalf("error precreating portmap rule: %v", err) + } +} diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index ea248217c6b2d..a4fea85c8a99a 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -9,6 +9,7 @@ import ( "bytes" "errors" "fmt" + "log" "net/netip" "os" "os/exec" @@ -383,9 +384,42 @@ func (i *iptablesRunner) AddDNATRule(origDst, dst netip.Addr) error { return table.Insert("nat", "PREROUTING", 1, "--destination", origDst.String(), "-j", "DNAT", "--to-destination", dst.String()) } -func (i *iptablesRunner) AddSNATRuleForDst(src, dst netip.Addr) error { +// EnsureSNATForDst sets up firewall to ensure that all traffic aimed for dst, has its source ip set to src: +// - creates a SNAT rule if not already present +// - ensures that any no longer valid SNAT rules for the same dst are removed +func (i *iptablesRunner) EnsureSNATForDst(src, dst netip.Addr) error { table := i.getIPTByAddr(dst) - return table.Insert("nat", "POSTROUTING", 1, "--destination", dst.String(), "-j", "SNAT", "--to-source", src.String()) + rules, err := table.List("nat", "POSTROUTING") + if err != nil { + return fmt.Errorf("error listing rules: %v", err) + } + // iptables accept either address or a CIDR value for the --destination flag, but converts an address to /32 + // CIDR. Explicitly passing a /32 CIDR made it possible to test this rule. + dstPrefix, err := dst.Prefix(32) + if err != nil { + return fmt.Errorf("error calculating prefix of dst %v: %v", dst, err) + } + + // wantsArgsPrefix is the prefix of the SNAT rule for the provided destination. + // We should only have one POSTROUTING rule with this prefix. + wantsArgsPrefix := fmt.Sprintf("-d %s -j SNAT --to-source", dstPrefix.String()) + // wantsArgs is the actual SNAT rule that we want. + wantsArgs := fmt.Sprintf("%s %s", wantsArgsPrefix, src.String()) + for _, r := range rules { + args := argsFromPostRoutingRule(r) + if strings.HasPrefix(args, wantsArgsPrefix) { + if strings.HasPrefix(args, wantsArgs) { + return nil + } + // SNAT rule matching the destination, but for a different source - delete. + if err := table.Delete("nat", "POSTROUTING", strings.Split(args, " ")...); err != nil { + // If we failed to delete don't crash the node- the proxy should still be functioning. + log.Printf("[unexpected] error deleting rule %s: %v, please report it.", r, err) + } + break + } + } + return table.Insert("nat", "POSTROUTING", 1, "-d", dstPrefix.String(), "-j", "SNAT", "--to-source", src.String()) } func (i *iptablesRunner) DNATNonTailscaleTraffic(tun string, dst netip.Addr) error { @@ -694,7 +728,7 @@ func delTSHook(ipt iptablesInterface, table, chain string, logf logger.Logf) err return nil } -// delChain flushs and deletes a chain. If the chain does not exist, it's a no-op +// delChain flushes and deletes a chain. If the chain does not exist, it's a no-op // since the desired state is already achieved. otherwise, it returns an error. func delChain(ipt iptablesInterface, table, chain string) error { if err := ipt.ClearChain(table, chain); err != nil { @@ -743,3 +777,10 @@ func clearRules(proto iptables.Protocol, logf logger.Logf) error { return multierr.New(errs...) } + +// argsFromPostRoutingRule accepts a rule as returned by iptables.List and, if it is a rule from POSTROUTING chain, +// returns the args part, else returns the original rule. +func argsFromPostRoutingRule(r string) string { + args, _ := strings.CutPrefix(r, "-A POSTROUTING ") + return args +} diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 2363e4ed3c93c..56f13c78a8010 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -289,3 +289,77 @@ func TestAddAndDelSNATRule(t *testing.T) { t.Fatal(err) } } + +func TestEnsureSNATForDst_ipt(t *testing.T) { + ip1, ip2, ip3 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("100.77.77.77") + iptr := NewFakeIPTablesRunner() + + // 1. A new rule gets added + mustCreateSNATRule_ipt(t, iptr, ip1, ip2) + checkSNATRule_ipt(t, iptr, ip1, ip2) + checkSNATRuleCount(t, iptr, ip1, 1) + + // 2. Another call to EnsureSNATForDst with the same src and dst does not result in another rule being added. + mustCreateSNATRule_ipt(t, iptr, ip1, ip2) + checkSNATRule_ipt(t, iptr, ip1, ip2) + checkSNATRuleCount(t, iptr, ip1, 1) // still just 1 rule + + // 3. Another call to EnsureSNATForDst with a different src and the same dst results in the earlier rule being + // deleted. + mustCreateSNATRule_ipt(t, iptr, ip3, ip2) + checkSNATRule_ipt(t, iptr, ip3, ip2) + checkSNATRuleCount(t, iptr, ip1, 1) // still just 1 rule + + // 4. Another call to EnsureSNATForDst with a different dst should not get the earlier rule deleted. + mustCreateSNATRule_ipt(t, iptr, ip3, ip1) + checkSNATRule_ipt(t, iptr, ip3, ip1) + checkSNATRuleCount(t, iptr, ip1, 2) // now 2 rules + + // 5. A call to EnsureSNATForDst with a match dst and a match port should not get deleted by EnsureSNATForDst for the same dst. + args := []string{"--destination", ip1.String(), "-j", "SNAT", "--to-source", "10.0.0.1"} + if err := iptr.getIPTByAddr(ip1).Insert("nat", "POSTROUTING", 1, args...); err != nil { + t.Fatalf("error adding SNAT rule: %v", err) + } + exists, err := iptr.getIPTByAddr(ip1).Exists("nat", "POSTROUTING", args...) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if !exists { + t.Fatalf("SNAT rule for destination and port unexpectedly deleted") + } + mustCreateSNATRule_ipt(t, iptr, ip3, ip1) + checkSNATRuleCount(t, iptr, ip1, 3) // now 3 rules +} + +func mustCreateSNATRule_ipt(t *testing.T, iptr *iptablesRunner, src, dst netip.Addr) { + t.Helper() + if err := iptr.EnsureSNATForDst(src, dst); err != nil { + t.Fatalf("error ensuring SNAT rule: %v", err) + } +} + +func checkSNATRule_ipt(t *testing.T, iptr *iptablesRunner, src, dst netip.Addr) { + t.Helper() + dstPrefix, err := dst.Prefix(32) + if err != nil { + t.Fatalf("error converting addr to prefix: %v", err) + } + exists, err := iptr.getIPTByAddr(src).Exists("nat", "POSTROUTING", "-d", dstPrefix.String(), "-j", "SNAT", "--to-source", src.String()) + if err != nil { + t.Fatalf("error checking if rule exists: %v", err) + } + if !exists { + t.Fatalf("SNAT rule for src %s dst %s should exist, but it does not", src, dst) + } +} + +func checkSNATRuleCount(t *testing.T, iptr *iptablesRunner, ip netip.Addr, wantsRules int) { + t.Helper() + rules, err := iptr.getIPTByAddr(ip).List("nat", "POSTROUTING") + if err != nil { + t.Fatalf("error listing rules: %v", err) + } + if len(rules) != wantsRules { + t.Fatalf("wants %d rules, got %d", wantsRules, len(rules)) + } +} diff --git a/util/linuxfw/nftables_for_svcs.go b/util/linuxfw/nftables_for_svcs.go new file mode 100644 index 0000000000000..130585b2229e3 --- /dev/null +++ b/util/linuxfw/nftables_for_svcs.go @@ -0,0 +1,245 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package linuxfw + +import ( + "errors" + "fmt" + "net/netip" + "reflect" + "strings" + + "github.com/google/nftables" + "github.com/google/nftables/binaryutil" + "github.com/google/nftables/expr" + "golang.org/x/sys/unix" +) + +// This file contains functionality that is currently (09/2024) used to set up +// routing for the Tailscale Kubernetes operator egress proxies. A tailnet +// service (identified by tailnet IP or FQDN) that gets exposed to cluster +// workloads gets a separate prerouting chain created for it for each IP family +// of the chain's target addresses. Each service's prerouting chain contains one +// or more portmapping rules. A portmapping rule DNATs traffic received on a +// particular port to a port of the tailnet service. Creating a chain per +// service makes it easier to delete a service when no longer needed and helps +// with readability. + +// EnsurePortMapRuleForSvc: +// - ensures that nat table exists +// - ensures that there is a prerouting chain for the given service and IP family of the target address in the nat table +// - ensures that there is a portmapping rule mathcing the given portmap (only creates the rule if it does not already exist) +func (n *nftablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error { + t, ch, err := n.ensureChainForSvc(svc, targetIP) + if err != nil { + return fmt.Errorf("error ensuring chain for %s: %w", svc, err) + } + meta := svcPortMapRuleMeta(svc, targetIP, pm) + rule, err := n.findRuleByMetadata(t, ch, meta) + if err != nil { + return fmt.Errorf("error looking up rule: %w", err) + } + if rule != nil { + return nil + } + p, err := protoFromString(pm.Protocol) + if err != nil { + return fmt.Errorf("error converting protocol %s: %w", pm.Protocol, err) + } + + rule = portMapRule(t, ch, tun, targetIP, pm.MatchPort, pm.TargetPort, p, meta) + n.conn.InsertRule(rule) + return n.conn.Flush() +} + +// DeletePortMapRuleForSvc deletes a portmapping rule in the given service/IP family chain. +// It finds the matching rule using metadata attached to the rule. +// The caller is expected to call DeleteSvc if the whole service (the chain) +// needs to be deleted, so we don't deal with the case where this is the only +// rule in the chain here. +func (n *nftablesRunner) DeletePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error { + table, err := n.getNFTByAddr(targetIP) + if err != nil { + return fmt.Errorf("error setting up nftables for IP family of %s: %w", targetIP, err) + } + t, err := getTableIfExists(n.conn, table.Proto, "nat") + if err != nil { + return fmt.Errorf("error checking if nat table exists: %w", err) + } + if t == nil { + return nil + } + ch, err := getChainFromTable(n.conn, t, svc) + if err != nil && !errors.Is(err, errorChainNotFound{t.Name, svc}) { + return fmt.Errorf("error checking if chain %s exists: %w", svc, err) + } + if errors.Is(err, errorChainNotFound{t.Name, svc}) { + return nil // service chain does not exist, so neither does the portmapping rule + } + meta := svcPortMapRuleMeta(svc, targetIP, pm) + rule, err := n.findRuleByMetadata(t, ch, meta) + if err != nil { + return fmt.Errorf("error checking if rule exists: %w", err) + } + if rule == nil { + return nil + } + if err := n.conn.DelRule(rule); err != nil { + return fmt.Errorf("error deleting rule: %w", err) + } + return n.conn.Flush() +} + +// DeleteSvc deletes the chains for the given service if any exist. +func (n *nftablesRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pm []PortMap) error { + for _, tip := range targetIPs { + table, err := n.getNFTByAddr(tip) + if err != nil { + return fmt.Errorf("error setting up nftables for IP family of %s: %w", tip, err) + } + t, err := getTableIfExists(n.conn, table.Proto, "nat") + if err != nil { + return fmt.Errorf("error checking if nat table exists: %w", err) + } + if t == nil { + return nil + } + ch, err := getChainFromTable(n.conn, t, svc) + if err != nil && !errors.Is(err, errorChainNotFound{t.Name, svc}) { + return fmt.Errorf("error checking if chain %s exists: %w", svc, err) + } + if errors.Is(err, errorChainNotFound{t.Name, svc}) { + return nil + } + n.conn.DelChain(ch) + } + return n.conn.Flush() +} + +func portMapRule(t *nftables.Table, ch *nftables.Chain, tun string, targetIP netip.Addr, matchPort, targetPort uint16, proto uint8, meta []byte) *nftables.Rule { + var fam uint32 + if targetIP.Is4() { + fam = unix.NFPROTO_IPV4 + } else { + fam = unix.NFPROTO_IPV6 + } + rule := &nftables.Rule{ + Table: t, + Chain: ch, + UserData: meta, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte(tun), + }, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte{proto}, + }, + &expr.Payload{ + DestRegister: 1, + Base: expr.PayloadBaseTransportHeader, + Offset: 2, + Len: 2, + }, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: binaryutil.BigEndian.PutUint16(matchPort), + }, + &expr.Immediate{ + Register: 1, + Data: targetIP.AsSlice(), + }, + &expr.Immediate{ + Register: 2, + Data: binaryutil.BigEndian.PutUint16(targetPort), + }, + &expr.NAT{ + Type: expr.NATTypeDestNAT, + Family: fam, + RegAddrMin: 1, + RegAddrMax: 1, + RegProtoMin: 2, + RegProtoMax: 2, + }, + }, + } + return rule +} + +// svcPortMapRuleMeta generates metadata for a rule. +// This metadata can then be used to find the rule. +// https://github.com/google/nftables/issues/48 +func svcPortMapRuleMeta(svcName string, targetIP netip.Addr, pm PortMap) []byte { + return []byte(fmt.Sprintf("svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol)) +} + +func (n *nftablesRunner) findRuleByMetadata(t *nftables.Table, ch *nftables.Chain, meta []byte) (*nftables.Rule, error) { + if n.conn == nil || t == nil || ch == nil || len(meta) == 0 { + return nil, nil + } + rules, err := n.conn.GetRules(t, ch) + if err != nil { + return nil, fmt.Errorf("error listing rules: %w", err) + } + for _, rule := range rules { + if reflect.DeepEqual(rule.UserData, meta) { + return rule, nil + } + } + return nil, nil +} + +func (n *nftablesRunner) ensureChainForSvc(svc string, targetIP netip.Addr) (*nftables.Table, *nftables.Chain, error) { + polAccept := nftables.ChainPolicyAccept + table, err := n.getNFTByAddr(targetIP) + if err != nil { + return nil, nil, fmt.Errorf("error setting up nftables for IP family of %v: %w", targetIP, err) + } + nat, err := createTableIfNotExist(n.conn, table.Proto, "nat") + if err != nil { + return nil, nil, fmt.Errorf("error ensuring nat table: %w", err) + } + svcCh, err := getOrCreateChain(n.conn, chainInfo{ + table: nat, + name: svc, + chainType: nftables.ChainTypeNAT, + chainHook: nftables.ChainHookPrerouting, + chainPriority: nftables.ChainPriorityNATDest, + chainPolicy: &polAccept, + }) + if err != nil { + return nil, nil, fmt.Errorf("error ensuring prerouting chain: %w", err) + } + return nat, svcCh, nil +} + +// // PortMap is the port mapping for a service rule. +type PortMap struct { + // MatchPort is the local port to which the rule should apply. + MatchPort uint16 + // TargetPort is the port to which the traffic should be forwarded. + TargetPort uint16 + // Protocol is the protocol to match packets on. Only TCP and UDP are + // supported. + Protocol string +} + +func protoFromString(s string) (uint8, error) { + switch strings.ToLower(s) { + case "tcp": + return unix.IPPROTO_TCP, nil + case "udp": + return unix.IPPROTO_UDP, nil + default: + return 0, fmt.Errorf("unrecognized protocol: %q", s) + } +} diff --git a/util/linuxfw/nftables_for_svcs_test.go b/util/linuxfw/nftables_for_svcs_test.go new file mode 100644 index 0000000000000..d2df6e4bdf2ef --- /dev/null +++ b/util/linuxfw/nftables_for_svcs_test.go @@ -0,0 +1,152 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package linuxfw + +import ( + "net/netip" + "testing" + + "github.com/google/nftables" +) + +// This test creates a temporary network namespace for the nftables rules being +// set up, so it needs to run in a privileged mode. Locally it needs to be run +// by root, else it will be silently skipped. In CI it runs in a privileged +// container. +func Test_nftablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + ipv4, ipv6 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + pmTCP := PortMap{MatchPort: 4003, TargetPort: 80, Protocol: "TCP"} + pmTCP1 := PortMap{MatchPort: 4004, TargetPort: 443, Protocol: "TCP"} + + // Create a rule for service 'foo' to forward TCP traffic to IPv4 endpoint + runner.EnsurePortMapRuleForSvc("foo", "tailscale0", ipv4, pmTCP) + svcChains(t, 1, conn) + chainRuleCount(t, "foo", 1, conn, nftables.TableFamilyIPv4) + checkPortMapRule(t, "foo", ipv4, pmTCP, runner, nftables.TableFamilyIPv4) + + // Create another rule for service 'foo' to forward TCP traffic to the + // same IPv4 endpoint, but to a different port. + runner.EnsurePortMapRuleForSvc("foo", "tailscale0", ipv4, pmTCP1) + svcChains(t, 1, conn) + chainRuleCount(t, "foo", 2, conn, nftables.TableFamilyIPv4) + checkPortMapRule(t, "foo", ipv4, pmTCP1, runner, nftables.TableFamilyIPv4) + + // Create a rule for service 'foo' to forward TCP traffic to an IPv6 endpoint + runner.EnsurePortMapRuleForSvc("foo", "tailscale0", ipv6, pmTCP) + svcChains(t, 2, conn) + chainRuleCount(t, "foo", 1, conn, nftables.TableFamilyIPv6) + checkPortMapRule(t, "foo", ipv6, pmTCP, runner, nftables.TableFamilyIPv6) + + // Create a rule for service 'bar' to forward TCP traffic to IPv4 endpoint + runner.EnsurePortMapRuleForSvc("bar", "tailscale0", ipv4, pmTCP) + svcChains(t, 3, conn) + chainRuleCount(t, "bar", 1, conn, nftables.TableFamilyIPv4) + checkPortMapRule(t, "bar", ipv4, pmTCP, runner, nftables.TableFamilyIPv4) + + // Create a rule for service 'bar' to forward TCP traffic to an IPv6 endpoint + runner.EnsurePortMapRuleForSvc("bar", "tailscale0", ipv6, pmTCP) + svcChains(t, 4, conn) + chainRuleCount(t, "bar", 1, conn, nftables.TableFamilyIPv6) + checkPortMapRule(t, "bar", ipv6, pmTCP, runner, nftables.TableFamilyIPv6) + + // Delete service bar + runner.DeleteSvc("bar", "tailscale0", []netip.Addr{ipv4, ipv6}, []PortMap{pmTCP}) + svcChains(t, 2, conn) + + // Delete a rule from service foo + runner.DeletePortMapRuleForSvc("foo", "tailscale0", ipv4, pmTCP) + svcChains(t, 2, conn) + chainRuleCount(t, "foo", 1, conn, nftables.TableFamilyIPv4) + + // Delete service foo + runner.DeleteSvc("foo", "tailscale0", []netip.Addr{ipv4, ipv6}, []PortMap{pmTCP, pmTCP1}) + svcChains(t, 0, conn) +} + +// svcChains verifies that the expected number of chains exist (for either IP +// family) and that each of them is configured as NAT prerouting chain. +func svcChains(t *testing.T, wantCount int, conn *nftables.Conn) { + t.Helper() + chains, err := conn.ListChains() + if err != nil { + t.Fatalf("error listing chains: %v", err) + } + if len(chains) != wantCount { + t.Fatalf("wants %d chains, got %d", wantCount, len(chains)) + } + for _, ch := range chains { + if *ch.Policy != nftables.ChainPolicyAccept { + t.Fatalf("chain %s has unexpected policy %v", ch.Name, *ch.Policy) + } + if ch.Type != nftables.ChainTypeNAT { + t.Fatalf("chain %s has unexpected type %v", ch.Name, ch.Type) + } + if *ch.Hooknum != *nftables.ChainHookPrerouting { + t.Fatalf("chain %s is attached to unexpected hook %v", ch.Name, ch.Hooknum) + } + if *ch.Priority != *nftables.ChainPriorityNATDest { + t.Fatalf("chain %s has unexpected priority %v", ch.Name, ch.Priority) + } + } +} + +// chainRuleCount verifies that the named chain in the given table contains the provided number of rules. +func chainRuleCount(t *testing.T, name string, numOfRules int, conn *nftables.Conn, fam nftables.TableFamily) { + t.Helper() + chains, err := conn.ListChainsOfTableFamily(fam) + if err != nil { + t.Fatalf("error listing chains: %v", err) + } + + for _, ch := range chains { + if ch.Name == name { + checkChainRules(t, conn, ch, numOfRules) + return + } + } + t.Fatalf("chain %s does not exist", name) +} + +// checkPortMapRule verifies that rule for the provided target IP and PortMap exists in a chain identified by service +// name and IP family. +func checkPortMapRule(t *testing.T, svc string, targetIP netip.Addr, pm PortMap, runner *nftablesRunner, fam nftables.TableFamily) { + t.Helper() + chains, err := runner.conn.ListChainsOfTableFamily(fam) + if err != nil { + t.Fatalf("error listing chains: %v", err) + } + var chain *nftables.Chain + for _, ch := range chains { + if ch.Name == svc { + chain = ch + break + } + } + if chain == nil { + t.Fatalf("chain for service %s does not exist", svc) + } + meta := svcPortMapRuleMeta(svc, targetIP, pm) + p, err := protoFromString(pm.Protocol) + if err != nil { + t.Fatalf("error converting protocol: %v", err) + } + wantsRule := portMapRule(chain.Table, chain, "tailscale0", targetIP, pm.MatchPort, pm.TargetPort, p, meta) + checkRule(t, wantsRule, runner.conn) +} + +// checkRule checks that the provided rules exists. +func checkRule(t *testing.T, rule *nftables.Rule, conn *nftables.Conn) { + t.Helper() + gotRule, err := findRule(conn, rule) + if err != nil { + t.Fatalf("error looking up rule: %v", err) + } + if gotRule == nil { + t.Fatal("rule not found") + } +} diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index ca5f65c2a4e0e..dc7f7b7eac3c3 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -193,7 +193,7 @@ func (n *nftablesRunner) DNATNonTailscaleTraffic(tunname string, dst netip.Addr) return n.conn.Flush() } -func (n *nftablesRunner) AddSNATRuleForDst(src, dst netip.Addr) error { +func (n *nftablesRunner) EnsureSNATForDst(src, dst netip.Addr) error { polAccept := nftables.ChainPolicyAccept table, err := n.getNFTByAddr(dst) if err != nil { @@ -216,44 +216,26 @@ func (n *nftablesRunner) AddSNATRuleForDst(src, dst netip.Addr) error { if err != nil { return fmt.Errorf("error ensuring postrouting chain: %w", err) } - var daddrOffset, fam, daddrLen uint32 - if dst.Is4() { - daddrOffset = 16 - daddrLen = 4 - fam = unix.NFPROTO_IPV4 - } else { - daddrOffset = 24 - daddrLen = 16 - fam = unix.NFPROTO_IPV6 - } - snatRule := &nftables.Rule{ - Table: nat, - Chain: postRoutingCh, - Exprs: []expr.Any{ - &expr.Payload{ - DestRegister: 1, - Base: expr.PayloadBaseNetworkHeader, - Offset: daddrOffset, - Len: daddrLen, - }, - &expr.Cmp{ - Op: expr.CmpOpEq, - Register: 1, - Data: dst.AsSlice(), - }, - &expr.Immediate{ - Register: 1, - Data: src.AsSlice(), - }, - &expr.NAT{ - Type: expr.NATTypeSourceNAT, - Family: fam, - RegAddrMin: 1, - }, - }, + rules, err := n.conn.GetRules(nat, postRoutingCh) + if err != nil { + return fmt.Errorf("error listing rules: %w", err) + } + snatRulePrefixMatch := fmt.Sprintf("dst:%s,src:", dst.String()) + snatRuleFullMatch := fmt.Sprintf("%s%s", snatRulePrefixMatch, src.String()) + for _, rule := range rules { + current := string(rule.UserData) + if strings.HasPrefix(string(rule.UserData), snatRulePrefixMatch) { + if strings.EqualFold(current, snatRuleFullMatch) { + return nil // already exists, do nothing + } + if err := n.conn.DelRule(rule); err != nil { + return fmt.Errorf("error deleting SNAT rule: %w", err) + } + } } - n.conn.AddRule(snatRule) + rule := snatRule(nat, postRoutingCh, src, dst, []byte(snatRuleFullMatch)) + n.conn.AddRule(rule) return n.conn.Flush() } @@ -557,11 +539,12 @@ type NetfilterRunner interface { // in the Kubernetes ingress proxies. DNATWithLoadBalancer(origDst netip.Addr, dsts []netip.Addr) error - // AddSNATRuleForDst adds a rule to the nat/POSTROUTING chain to SNAT - // traffic destined for dst to src. + // EnsureSNATForDst sets up firewall to mask the source for traffic destined for dst to src: + // - creates a SNAT rule if it doesn't already exist + // - deletes any pre-existing rules matching the destination // This is used to forward traffic destined for the local machine over // the Tailscale interface, as used in the Kubernetes egress proxies. - AddSNATRuleForDst(src, dst netip.Addr) error + EnsureSNATForDst(src, dst netip.Addr) error // DNATNonTailscaleTraffic adds a rule to the nat/PREROUTING chain to DNAT // all traffic inbound from any interface except exemptInterface to dst. @@ -569,6 +552,12 @@ type NetfilterRunner interface { // the Tailscale interface, as used in the Kubernetes egress proxies. DNATNonTailscaleTraffic(exemptInterface string, dst netip.Addr) error + EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error + + DeletePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm PortMap) error + + DeleteSvc(svc, tun string, targetIPs []netip.Addr, pm []PortMap) error + // ClampMSSToPMTU adds a rule to the mangle/FORWARD chain to clamp MSS for // traffic destined for the provided tun interface. ClampMSSToPMTU(tun string, addr netip.Addr) error @@ -2034,3 +2023,45 @@ func NfTablesCleanUp(logf logger.Logf) { } } } + +func snatRule(t *nftables.Table, ch *nftables.Chain, src, dst netip.Addr, meta []byte) *nftables.Rule { + var daddrOffset, fam, daddrLen uint32 + if dst.Is4() { + daddrOffset = 16 + daddrLen = 4 + fam = unix.NFPROTO_IPV4 + } else { + daddrOffset = 24 + daddrLen = 16 + fam = unix.NFPROTO_IPV6 + } + + return &nftables.Rule{ + Table: t, + Chain: ch, + Exprs: []expr.Any{ + &expr.Payload{ + DestRegister: 1, + Base: expr.PayloadBaseNetworkHeader, + Offset: daddrOffset, + Len: daddrLen, + }, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: dst.AsSlice(), + }, + &expr.Immediate{ + Register: 1, + Data: src.AsSlice(), + }, + &expr.NAT{ + Type: expr.NATTypeSourceNAT, + Family: fam, + RegAddrMin: 1, + RegAddrMax: 1, + }, + }, + UserData: meta, + } +} diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index ebf514c79b1f0..712a7b93955da 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -954,6 +954,37 @@ func TestPickFirewallModeFromInstalledRules(t *testing.T) { } } +// This test creates a temporary network namespace for the nftables rules being +// set up, so it needs to run in a privileged mode. Locally it needs to be run +// by root, else it will be silently skipped. In CI it runs in a privileged +// container. +func TestEnsureSNATForDst_nftables(t *testing.T) { + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + ip1, ip2, ip3 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("100.77.77.77") + + // 1. A new rule gets added + mustCreateSNATRule_nft(t, runner, ip1, ip2) + chainRuleCount(t, "POSTROUTING", 1, conn, nftables.TableFamilyIPv4) + checkSNATRule_nft(t, runner, runner.nft4.Proto, ip1, ip2) + + // 2. Another call to EnsureSNATForDst with the same src and dst does not result in another rule being added. + mustCreateSNATRule_nft(t, runner, ip1, ip2) + chainRuleCount(t, "POSTROUTING", 1, conn, nftables.TableFamilyIPv4) // still just one rule + checkSNATRule_nft(t, runner, runner.nft4.Proto, ip1, ip2) + + // 3. Another call to EnsureSNATForDst with a different src and the same dst results in the earlier rule being + // deleted. + mustCreateSNATRule_nft(t, runner, ip3, ip2) + chainRuleCount(t, "POSTROUTING", 1, conn, nftables.TableFamilyIPv4) // still just one rule + checkSNATRule_nft(t, runner, runner.nft4.Proto, ip3, ip2) + + // 4. Another call to EnsureSNATForDst with a different dst should not get the earlier rule deleted. + mustCreateSNATRule_nft(t, runner, ip3, ip1) + chainRuleCount(t, "POSTROUTING", 2, conn, nftables.TableFamilyIPv4) // now two rules + checkSNATRule_nft(t, runner, runner.nft4.Proto, ip3, ip1) +} + func newFakeNftablesRunnerWithConn(t *testing.T, conn *nftables.Conn, hasIPv6 bool) *nftablesRunner { t.Helper() if !hasIPv6 { @@ -964,3 +995,32 @@ func newFakeNftablesRunnerWithConn(t *testing.T, conn *nftables.Conn, hasIPv6 bo } return newNfTablesRunnerWithConn(t.Logf, conn) } + +func mustCreateSNATRule_nft(t *testing.T, runner *nftablesRunner, src, dst netip.Addr) { + t.Helper() + if err := runner.EnsureSNATForDst(src, dst); err != nil { + t.Fatalf("error ensuring SNAT rule: %v", err) + } +} + +// checkSNATRule_nft verifies that a SNAT rule for the given destination and source exists. +func checkSNATRule_nft(t *testing.T, runner *nftablesRunner, fam nftables.TableFamily, src, dst netip.Addr) { + t.Helper() + chains, err := runner.conn.ListChainsOfTableFamily(fam) + if err != nil { + t.Fatalf("error listing chains: %v", err) + } + var chain *nftables.Chain + for _, ch := range chains { + if ch.Name == "POSTROUTING" { + chain = ch + break + } + } + if chain == nil { + t.Fatal("POSTROUTING chain does not exist") + } + meta := []byte(fmt.Sprintf("dst:%s,src:%s", dst.String(), src.String())) + wantsRule := snatRule(chain.Table, chain, src, dst, meta) + checkRule(t, wantsRule, runner.conn) +} diff --git a/util/syspolicy/internal/loggerx/logger.go b/util/syspolicy/internal/loggerx/logger.go index b28610826382b..c29a5f0845cd6 100644 --- a/util/syspolicy/internal/loggerx/logger.go +++ b/util/syspolicy/internal/loggerx/logger.go @@ -6,6 +6,7 @@ package loggerx import ( "log" + "sync/atomic" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -13,34 +14,51 @@ import ( ) const ( - errorPrefix = "syspolicy: " + normalPrefix = "syspolicy: " verbosePrefix = "syspolicy: [v2] " ) var ( - lazyErrorf lazy.SyncValue[logger.Logf] + debugLogging atomic.Bool // whether debugging logging is enabled + + lazyPrintf lazy.SyncValue[logger.Logf] lazyVerbosef lazy.SyncValue[logger.Logf] ) +// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. +func SetDebugLoggingEnabled(v bool) { + debugLogging.Store(v) +} + // Errorf formats and writes an error message to the log. func Errorf(format string, args ...any) { - errorf := lazyErrorf.Get(func() logger.Logf { - return logger.WithPrefix(log.Printf, errorPrefix) - }) - errorf(format, args...) + printf(format, args...) } // Verbosef formats and writes an optional, verbose message to the log. func Verbosef(format string, args ...any) { - verbosef := lazyVerbosef.Get(func() logger.Logf { + if debugLogging.Load() { + printf(format, args...) + } else { + verbosef(format, args...) + } +} + +func printf(format string, args ...any) { + lazyPrintf.Get(func() logger.Logf { + return logger.WithPrefix(log.Printf, normalPrefix) + })(format, args...) +} + +func verbosef(format string, args ...any) { + lazyVerbosef.Get(func() logger.Logf { return logger.WithPrefix(log.Printf, verbosePrefix) - }) - verbosef(format, args...) + })(format, args...) } -// SetForTest sets the specified errorf and verbosef functions for the duration +// SetForTest sets the specified printf and verbosef functions for the duration // of tb and its subtests. -func SetForTest(tb internal.TB, errorf, verbosef logger.Logf) { - lazyErrorf.SetForTest(tb, errorf, nil) +func SetForTest(tb internal.TB, printf, verbosef logger.Logf) { + lazyPrintf.SetForTest(tb, printf, nil) lazyVerbosef.SetForTest(tb, verbosef, nil) } diff --git a/util/syspolicy/internal/loggerx/logger_test.go b/util/syspolicy/internal/loggerx/logger_test.go new file mode 100644 index 0000000000000..9735b5d30c20b --- /dev/null +++ b/util/syspolicy/internal/loggerx/logger_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package loggerx + +import ( + "fmt" + "io" + "strings" + "testing" + + "tailscale.com/types/logger" +) + +func TestDebugLogging(t *testing.T) { + var normal, verbose strings.Builder + SetForTest(t, logfTo(&normal), logfTo(&verbose)) + + checkOutput := func(wantNormal, wantVerbose string) { + t.Helper() + if gotNormal := normal.String(); gotNormal != wantNormal { + t.Errorf("Unexpected normal output: got %q; want %q", gotNormal, wantNormal) + } + if gotVerbose := verbose.String(); gotVerbose != wantVerbose { + t.Errorf("Unexpected verbose output: got %q; want %q", gotVerbose, wantVerbose) + } + normal.Reset() + verbose.Reset() + } + + Errorf("This is an error message: %v", 42) + checkOutput("This is an error message: 42", "") + Verbosef("This is a verbose message: %v", 17) + checkOutput("", "This is a verbose message: 17") + + SetDebugLoggingEnabled(true) + Errorf("This is an error message: %v", 42) + checkOutput("This is an error message: 42", "") + Verbosef("This is a verbose message: %v", 17) + checkOutput("This is a verbose message: 17", "") + + SetDebugLoggingEnabled(false) + Errorf("This is an error message: %v", 42) + checkOutput("This is an error message: 42", "") + Verbosef("This is a verbose message: %v", 17) + checkOutput("", "This is a verbose message: 17") +} + +func logfTo(w io.Writer) logger.Logf { + return func(format string, args ...any) { + fmt.Fprintf(w, format, args...) + } +} diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 306bf759ea086..512bc487c5b98 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -4,6 +4,8 @@ package setting import ( + "iter" + "maps" "slices" "strings" @@ -25,15 +27,13 @@ func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot { return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)} } -// All returns a map of all policy settings in s. -// The returned map must not be modified. -func (s *Snapshot) All() map[Key]RawItem { +// All returns an iterator over policy settings in s. The iteration order is not +// specified and is not guaranteed to be the same from one call to the next. +func (s *Snapshot) All() iter.Seq2[Key, RawItem] { if s == nil { - return nil + return func(yield func(Key, RawItem) bool) {} } - // TODO(nickkhyl): return iter.Seq2[[Key], [RawItem]] in Go 1.23, - // and remove [keyItemPair]. - return s.m + return maps.All(s.m) } // Get returns the value of the policy setting with the specified key @@ -87,12 +87,11 @@ func (s *Snapshot) EqualItems(s2 *Snapshot) bool { // Keys return an iterator over keys in s. The iteration order is not specified // and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) Keys() []Key { +func (s *Snapshot) Keys() iter.Seq[Key] { if s.m == nil { - return nil + return func(yield func(Key) bool) {} } - // TODO(nickkhyl): return iter.Seq[Key] in Go 1.23. - return xmaps.Keys(s.m) + return maps.Keys(s.m) } // Len reports the number of [RawItem]s in s. @@ -116,8 +115,6 @@ func (s *Snapshot) String() string { if s.Len() == 0 && s.Summary().IsEmpty() { return "{Empty}" } - keys := s.Keys() - slices.Sort(keys) var sb strings.Builder if !s.summary.IsEmpty() { sb.WriteRune('{') @@ -127,7 +124,7 @@ func (s *Snapshot) String() string { sb.WriteString(s.summary.String()) sb.WriteRune('}') } - for _, k := range keys { + for _, k := range slices.Sorted(s.Keys()) { if sb.Len() != 0 { sb.WriteRune('\n') } diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index ccfd83347ddca..abe42ed90f8c7 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -8,6 +8,7 @@ import ( "errors" "time" + "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" ) @@ -135,3 +136,8 @@ func SelectControlURL(reg, disk string) string { } return def } + +// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. +func SetDebugLoggingEnabled(v bool) { + loggerx.SetDebugLoggingEnabled(v) +} diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index cb3f66ea98d89..c964e08a76395 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -10,29 +10,33 @@ import ( "fmt" "io" "net/http" + "strings" "tailscale.com/metrics" "tailscale.com/tsweb/varz" ) -var vars expvar.Map +// Registry tracks user-facing metrics of various Tailscale subsystems. +type Registry struct { + vars expvar.Map +} -// NewMultiLabelMap creates and register a new +// NewMultiLabelMapWithRegistry creates and register a new // MultiLabelMap[T] variable with the given name and returns it. // The variable is registered with the userfacing metrics package. // // Note that usermetric are not protected against duplicate // metrics name. It is the caller's responsibility to ensure that // the name is unique. -func NewMultiLabelMap[T comparable](name string, promType, helpText string) *metrics.MultiLabelMap[T] { - m := &metrics.MultiLabelMap[T]{ +func NewMultiLabelMapWithRegistry[T comparable](m *Registry, name string, promType, helpText string) *metrics.MultiLabelMap[T] { + ml := &metrics.MultiLabelMap[T]{ Type: promType, Help: helpText, } var zero T _ = metrics.LabelString(zero) // panic early if T is invalid - vars.Set(name, m) - return m + m.vars.Set(name, ml) + return ml } // Gauge is a gauge metric with no labels. @@ -42,20 +46,26 @@ type Gauge struct { } // NewGauge creates and register a new gauge metric with the given name and help text. -func NewGauge(name, help string) *Gauge { +func (r *Registry) NewGauge(name, help string) *Gauge { g := &Gauge{&expvar.Float{}, help} - vars.Set(name, g) + r.vars.Set(name, g) return g } // Set sets the gauge to the given value. func (g *Gauge) Set(v float64) { + if g == nil { + return + } g.m.Set(v) } // String returns the string of the underlying expvar.Float. // This satisfies the expvar.Var interface. func (g *Gauge) String() string { + if g == nil { + return "" + } return g.m.String() } @@ -79,6 +89,17 @@ func (g *Gauge) WritePrometheus(w io.Writer, name string) { // Handler returns a varz.Handler that serves the userfacing expvar contained // in this package. -func Handler(w http.ResponseWriter, r *http.Request) { - varz.ExpvarDoHandler(vars.Do)(w, r) +func (r *Registry) Handler(w http.ResponseWriter, req *http.Request) { + varz.ExpvarDoHandler(r.vars.Do)(w, req) +} + +// String returns the string representation of all the metrics and their +// values in the registry. It is useful for debugging. +func (r *Registry) String() string { + var sb strings.Builder + r.vars.Do(func(kv expvar.KeyValue) { + fmt.Fprintf(&sb, "%s: %v\n", kv.Key, kv.Value) + }) + + return sb.String() } diff --git a/util/usermetric/usermetric_test.go b/util/usermetric/usermetric_test.go index aa0e82ea61969..e92db5bfce130 100644 --- a/util/usermetric/usermetric_test.go +++ b/util/usermetric/usermetric_test.go @@ -9,7 +9,8 @@ import ( ) func TestGauge(t *testing.T) { - g := NewGauge("test_gauge", "This is a test gauge") + var reg Registry + g := reg.NewGauge("test_gauge", "This is a test gauge") g.Set(15) var buf bytes.Buffer diff --git a/util/vizerror/vizerror.go b/util/vizerror/vizerror.go index 158786494d4f3..919d765d0ef2d 100644 --- a/util/vizerror/vizerror.go +++ b/util/vizerror/vizerror.go @@ -12,35 +12,67 @@ import ( // Error is an error that is safe to display to end users. type Error struct { - err error + publicErr error // visible to end users + wrapped error // internal } -// Error implements the error interface. +// Error implements the error interface. The returned string is safe to display +// to end users. func (e Error) Error() string { - return e.err.Error() + return e.publicErr.Error() } // New returns an error that formats as the given text. It always returns a vizerror.Error. -func New(text string) error { - return Error{errors.New(text)} +func New(publicMsg string) error { + err := errors.New(publicMsg) + return Error{ + publicErr: err, + wrapped: err, + } } -// Errorf returns an Error with the specified format and values. It always returns a vizerror.Error. -func Errorf(format string, a ...any) error { - return Error{fmt.Errorf(format, a...)} +// Errorf returns an Error with the specified publicMsgFormat and values. It always returns a vizerror.Error. +// +// Warning: avoid using an error as one of the format arguments, as this will cause the text +// of that error to be displayed to the end user (which is probably not what you want). +func Errorf(publicMsgFormat string, a ...any) error { + err := fmt.Errorf(publicMsgFormat, a...) + return Error{ + publicErr: err, + wrapped: err, + } } // Unwrap returns the underlying error. +// +// If the Error was constructed using [WrapWithMessage], this is the wrapped (internal) error +// and not the user-visible error message. func (e Error) Unwrap() error { - return e.err + return e.wrapped } -// Wrap wraps err with a vizerror.Error. -func Wrap(err error) error { - if err == nil { +// Wrap wraps publicErr with a vizerror.Error. +// +// Deprecated: this is almost always the wrong thing to do. Are you really sure +// you know exactly what err.Error() will stringify to and be safe to show to +// users? [WrapWithMessage] is probably what you want. +func Wrap(publicErr error) error { + if publicErr == nil { return nil } - return Error{err} + return Error{publicErr: publicErr, wrapped: publicErr} +} + +// WrapWithMessage wraps the given error with a message that's safe to display +// to end users. The text of the wrapped error will not be displayed to end +// users. +// +// WrapWithMessage should almost always be preferred to [Wrap]. +func WrapWithMessage(wrapped error, publicMsg string) error { + return Error{ + publicErr: errors.New(publicMsg), + wrapped: wrapped, + } } // As returns the first vizerror.Error in err's chain. diff --git a/util/vizerror/vizerror_test.go b/util/vizerror/vizerror_test.go index bbd2c07e5840c..242ca6462f37b 100644 --- a/util/vizerror/vizerror_test.go +++ b/util/vizerror/vizerror_test.go @@ -42,3 +42,25 @@ func TestAs(t *testing.T) { t.Errorf("As() returned error %v, want %v", got, verr) } } + +func TestWrap(t *testing.T) { + wrapped := errors.New("wrapped") + err := Wrap(wrapped) + if err.Error() != "wrapped" { + t.Errorf(`Wrap(wrapped).Error() = %q, want %q`, err.Error(), "wrapped") + } + if errors.Unwrap(err) != wrapped { + t.Errorf("Unwrap = %q, want %q", errors.Unwrap(err), wrapped) + } +} + +func TestWrapWithMessage(t *testing.T) { + wrapped := errors.New("wrapped") + err := WrapWithMessage(wrapped, "safe") + if err.Error() != "safe" { + t.Errorf(`WrapWithMessage(wrapped, "safe").Error() = %q, want %q`, err.Error(), "safe") + } + if errors.Unwrap(err) != wrapped { + t.Errorf("Unwrap = %q, want %q", errors.Unwrap(err), wrapped) + } +} diff --git a/version-embed.go b/version-embed.go index 40c2e7cef89b3..2d517339d571c 100644 --- a/version-embed.go +++ b/version-embed.go @@ -4,7 +4,10 @@ // Package tailscaleroot embeds VERSION.txt into the binary. package tailscaleroot -import _ "embed" +import ( + _ "embed" + "runtime/debug" +) // VersionDotTxt is the contents of VERSION.txt. Despite the tempting filename, // this does not necessarily contain the accurate version number of the build, which @@ -22,3 +25,16 @@ var AlpineDockerTag string // //go:embed go.toolchain.rev var GoToolchainRev string + +func tailscaleToolchainRev() (gitHash string, ok bool) { + bi, ok := debug.ReadBuildInfo() + if !ok { + return "", false + } + for _, s := range bi.Settings { + if s.Key == "tailscale.toolchain.rev" { + return s.Value, true + } + } + return "", false +} diff --git a/version/mkversion/mkversion.go b/version/mkversion/mkversion.go index 148bca92653a8..2fa84480dd144 100644 --- a/version/mkversion/mkversion.go +++ b/version/mkversion/mkversion.go @@ -61,7 +61,7 @@ type VersionInfo struct { // Winres is the version string that gets embedded into Windows exe // metadata. It is of the form "x,y,z,0". Winres string - // Synology is a map of Synology DSM major version to the + // Synology is a map of Synology DSM version to the // Tailscale numeric version that gets embedded in Synology spk // files. Synology map[int]int64 @@ -252,12 +252,13 @@ func mkOutput(v verInfo) (VersionInfo, error) { Track: track, Synology: map[int]int64{ // Synology requires that version numbers be in a specific format. - // Builds with version numbers that don't start with "60" or "70" will fail, + // Builds with version numbers that don't start with "60", "70", or "72" will fail, // and the full version number must be within int32 range. // So, we do the following mapping from our Tailscale version to Synology version, // giving major version three decimal places, minor version three, and patch two. - 6: 6*100_000_000 + int64(v.major-1)*1_000_000 + int64(v.minor)*1_000 + int64(v.patch), - 7: 7*100_000_000 + int64(v.major-1)*1_000_000 + int64(v.minor)*1_000 + int64(v.patch), + 60: 60*10_000_000 + int64(v.major-1)*1_000_000 + int64(v.minor)*1_000 + int64(v.patch), + 70: 70*10_000_000 + int64(v.major-1)*1_000_000 + int64(v.minor)*1_000 + int64(v.patch), + 72: 72*10_000_000 + int64(v.major-1)*1_000_000 + int64(v.minor)*1_000 + int64(v.patch), }, } diff --git a/version_tailscale_test.go b/version_tailscale_test.go new file mode 100644 index 0000000000000..0a690e312202f --- /dev/null +++ b/version_tailscale_test.go @@ -0,0 +1,27 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build tailscale_go + +package tailscaleroot + +import ( + "os" + "strings" + "testing" +) + +func TestToolchainMatches(t *testing.T) { + tsRev, ok := tailscaleToolchainRev() + if !ok { + t.Fatal("failed to read build info") + } + want := strings.TrimSpace(GoToolchainRev) + if tsRev != want { + if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { + t.Logf("tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1", tsRev, want) + return + } + t.Errorf("tailscale.toolchain.rev = %q, want %q; permit with TS_PERMIT_TOOLCHAIN_MISMATCH=1", tsRev, want) + } +} diff --git a/wf/firewall.go b/wf/firewall.go index 730fa3d15533e..076944c8decad 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -22,6 +22,9 @@ var ( linkLocalDHCPMulticast = netip.MustParseAddr("ff02::1:2") siteLocalDHCPMulticast = netip.MustParseAddr("ff05::1:3") linkLocalRouterMulticast = netip.MustParseAddr("ff02::2") + + linkLocalMulticastIPv4Range = netip.MustParsePrefix("224.0.0.0/24") + linkLocalMulticastIPv6Range = netip.MustParsePrefix("ff02::/16") ) type direction int @@ -224,15 +227,67 @@ func (f *Firewall) UpdatePermittedRoutes(newRoutes []netip.Prefix) error { } else { p = protocolV6 } - rules, err := f.addRules("local route", weightKnownTraffic, conditions, wf.ActionPermit, p, directionBoth) + name := "local route - " + r.String() + rules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionBoth) + if err != nil { + return err + } + + name = "link-local multicast - " + r.String() + conditions = matchLinkLocalMulticast(r, false) + multicastRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + if err != nil { + return err + } + rules = append(rules, multicastRules...) + + conditions = matchLinkLocalMulticast(r, true) + multicastRules, err = f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) if err != nil { return err } + rules = append(rules, multicastRules...) + f.permittedRoutes[r] = rules } return nil } +// matchLinkLocalMulticast returns a list of conditions that match +// outbound or inbound link-local multicast traffic to or from the +// specified network. +func matchLinkLocalMulticast(pfx netip.Prefix, inbound bool) []*wf.Match { + var linkLocalMulticastRange netip.Prefix + if pfx.Addr().Is4() { + linkLocalMulticastRange = linkLocalMulticastIPv4Range + } else { + linkLocalMulticastRange = linkLocalMulticastIPv6Range + } + var localAddr, remoteAddr netip.Prefix + if inbound { + localAddr, remoteAddr = linkLocalMulticastRange, pfx + } else { + localAddr, remoteAddr = pfx, linkLocalMulticastRange + } + return []*wf.Match{ + { + Field: wf.FieldIPProtocol, + Op: wf.MatchTypeEqual, + Value: wf.IPProtoUDP, + }, + { + Field: wf.FieldIPLocalAddress, + Op: wf.MatchTypeEqual, + Value: localAddr, + }, + { + Field: wf.FieldIPRemoteAddress, + Op: wf.MatchTypeEqual, + Value: remoteAddr, + }, + } +} + func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions []*wf.Match, action wf.Action) (*wf.Rule, error) { id, err := windows.GenerateGUID() if err != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a056cff22676b..08aff842d77aa 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -60,6 +60,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/testenv" "tailscale.com/util/uniq" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/capture" "tailscale.com/wgengine/wgint" ) @@ -386,6 +387,9 @@ type Options struct { // report errors and warnings to. HealthTracker *health.Tracker + // Metrics specifies the metrics registry to record metrics to. + Metrics *usermetric.Registry + // ControlKnobs are the set of control knobs to use. // If nil, they're ignored and not updated. ControlKnobs *controlknobs.Knobs @@ -491,15 +495,9 @@ func NewConn(opts Options) (*Conn, error) { c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) c.donec = c.connCtx.Done() c.netChecker = &netcheck.Client{ - Logf: logger.WithPrefix(c.logf, "netcheck: "), - NetMon: c.netMon, - SendPacket: func(b []byte, ap netip.AddrPort) (int, error) { - ok, err := c.sendUDP(ap, b) - if !ok { - return 0, err - } - return len(b), err - }, + Logf: logger.WithPrefix(c.logf, "netcheck: "), + NetMon: c.netMon, + SendPacket: c.sendUDPNetcheck, SkipExternalNetwork: inTest(), PortMapper: c.portMapper, UseDNSCache: true, @@ -582,7 +580,7 @@ func (c *Conn) updateEndpoints(why string) { c.muCond.Broadcast() }() c.dlogf("[v1] magicsock: starting endpoint update (%s)", why) - if c.noV4Send.Load() && runtime.GOOS != "js" { + if c.noV4Send.Load() && runtime.GOOS != "js" && !c.onlyTCP443.Load() { c.mu.Lock() closed := c.closed c.mu.Unlock() @@ -688,9 +686,6 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) { return new(netcheck.Report), nil } - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - report, err := c.netChecker.GetReport(ctx, dm, &netcheck.GetReportOpts{ // Pass information about the last time that we received a // frame from a DERP server to our netchecker to help avoid @@ -701,6 +696,7 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) { // health package here, but I'd rather do that and not store // the exact same state in two different places. GetLastDERPActivity: c.health.GetDERPRegionReceivedTime, + OnlyTCP443: c.onlyTCP443.Load(), }) if err != nil { return nil, err @@ -1153,8 +1149,8 @@ func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte) (sent bool, err error) { // maybeRebindOnError performs a rebind and restun if the error is defined and // any conditionals are met. func (c *Conn) maybeRebindOnError(os string, err error) bool { - switch err { - case syscall.EPERM: + switch { + case errors.Is(err, syscall.EPERM): why := "operation-not-permitted-rebind" switch os { // We currently will only rebind and restun on a syscall.EPERM if it is experienced @@ -1178,7 +1174,25 @@ func (c *Conn) maybeRebindOnError(os string, err error) bool { return false } -// sendUDP sends UDP packet b to addr. +// sendUDPNetcheck sends b via UDP to addr. It is used exclusively by netcheck. +// It returns the number of bytes sent along with any error encountered. It +// returns errors.ErrUnsupported if the client is explicitly configured to only +// send data over TCP port 443 and/or we're running on wasm. +func (c *Conn) sendUDPNetcheck(b []byte, addr netip.AddrPort) (int, error) { + if c.onlyTCP443.Load() || runtime.GOOS == "js" { + return 0, errors.ErrUnsupported + } + switch { + case addr.Addr().Is4(): + return c.pconn4.WriteToUDPAddrPort(b, addr) + case addr.Addr().Is6(): + return c.pconn6.WriteToUDPAddrPort(b, addr) + default: + panic("bogus sendUDPNetcheck addr type") + } +} + +// sendUDPStd sends UDP packet b to addr. // See sendAddr's docs on the return value meanings. func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error) { if c.onlyTCP443.Load() { @@ -1285,7 +1299,7 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu healthItem.Enter() defer healthItem.Exit() defer func() { - if retErr != nil { + if retErr != nil && !c.closing.Load() { c.logf("Receive func %s exiting with error: %T, %v", healthItem.Name(), retErr, retErr) } }() @@ -3013,18 +3027,9 @@ func getPeerMTUsProbedMetric(mtu tstun.WireMTU) *clientmetric.Metric { return mm } -// GetLastNetcheckReport returns the last netcheck report, running a new one if a recent one does not exist. +// GetLastNetcheckReport returns the last netcheck report, returning nil if a recent one does not exist. func (c *Conn) GetLastNetcheckReport(ctx context.Context) *netcheck.Report { - lastReport := c.lastNetCheckReport.Load() - if lastReport == nil { - nr, err := c.updateNetInfo(ctx) - if err != nil { - c.logf("magicsock.Conn.GetLastNetcheckReport: updateNetInfo: %v", err) - return nil - } - return nr - } - return lastReport + return c.lastNetCheckReport.Load() } // SetLastNetcheckReportForTest sets the magicsock conn's last netcheck report. diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index be1b43f56a151..6b2d961b9b6fd 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -64,6 +64,7 @@ import ( "tailscale.com/util/cibuild" "tailscale.com/util/racebuild" "tailscale.com/util/set" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgcfg/nmcfg" @@ -156,6 +157,7 @@ type magicStack struct { dev *device.Device // the wireguard-go Device that connects the previous things wgLogger *wglog.Logger // wireguard-go log wrapper netMon *netmon.Monitor // always non-nil + metrics *usermetric.Registry } // newMagicStack builds and initializes an idle magicsock and @@ -174,9 +176,11 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen t.Fatalf("netmon.New: %v", err) } + var reg usermetric.Registry epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary conn, err := NewConn(Options{ NetMon: netMon, + Metrics: ®, Logf: logf, DisablePortMapper: true, TestOnlyPacketListener: l, @@ -193,7 +197,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen } tun := tuntest.NewChannelTUN() - tsTun := tstun.Wrap(logf, tun.TUN()) + tsTun := tstun.Wrap(logf, tun.TUN(), ®) tsTun.SetFilter(filter.NewAllowAllForTest(logf)) tsTun.Start() @@ -219,6 +223,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen dev: dev, wgLogger: wgLogger, netMon: netMon, + metrics: ®, } } @@ -397,6 +402,7 @@ func TestNewConn(t *testing.T) { EndpointsFunc: epFunc, Logf: t.Logf, NetMon: netMon, + Metrics: new(usermetric.Registry), }) if err != nil { t.Fatal(err) @@ -523,6 +529,7 @@ func TestDeviceStartStop(t *testing.T) { EndpointsFunc: func(eps []tailcfg.Endpoint) {}, Logf: t.Logf, NetMon: netMon, + Metrics: new(usermetric.Registry), }) if err != nil { t.Fatal(err) @@ -1275,6 +1282,7 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, HealthTracker: new(health.Tracker), + Metrics: new(usermetric.Registry), DisablePortMapper: true, Logf: t.Logf, Port: port, @@ -2957,26 +2965,31 @@ func TestMaybeRebindOnError(t *testing.T) { tstest.PanicOnLog() tstest.ResourceCheck(t) - conn := newTestConn(t) - defer conn.Close() + err := fmt.Errorf("outer err: %w", syscall.EPERM) t.Run("darwin-rebind", func(t *testing.T) { - rebound := conn.maybeRebindOnError("darwin", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + rebound := conn.maybeRebindOnError("darwin", err) if !rebound { t.Errorf("darwin should rebind on syscall.EPERM") } }) t.Run("linux-not-rebind", func(t *testing.T) { - rebound := conn.maybeRebindOnError("linux", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + rebound := conn.maybeRebindOnError("linux", err) if rebound { t.Errorf("linux should not rebind on syscall.EPERM") } }) t.Run("no-frequent-rebind", func(t *testing.T) { + conn := newTestConn(t) + defer conn.Close() conn.lastEPERMRebind.Store(time.Now().Add(-1 * time.Second)) - rebound := conn.maybeRebindOnError("darwin", syscall.EPERM) + rebound := conn.maybeRebindOnError("darwin", err) if rebound { t.Errorf("darwin should not rebind on syscall.EPERM within 5 seconds of last") } diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index d029b6c194575..3185c5d556aa9 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -5,6 +5,7 @@ package netstack import ( + "bytes" "context" "errors" "expvar" @@ -413,15 +414,14 @@ func init() { // endpoint, and name collisions will result in Prometheus scraping errors. clientmetric.NewCounterFunc("netstack_tcp_forward_dropped_attempts", func() int64 { var total uint64 - stacksForMetrics.Range(func(ns *Impl, _ struct{}) bool { + for ns := range stacksForMetrics.Keys() { delta := ns.ipstack.Stats().TCP.ForwardMaxInFlightDrop.Value() if total+delta > math.MaxInt64 { total = math.MaxInt64 - return false + break } total += delta - return true - }) + } return int64(total) }) } @@ -1909,3 +1909,35 @@ func (ns *Impl) ExpVar() expvar.Var { return m } + +// windowsPingOutputIsSuccess reports whether the ping.exe output b contains a +// success ping response for ip. +// +// See https://github.com/tailscale/tailscale/issues/13654 +// +// TODO(bradfitz,nickkhyl): delete this and use the proper Windows APIs. +func windowsPingOutputIsSuccess(ip netip.Addr, b []byte) bool { + // Look for a line that contains " : " and then three equal signs. + // As a special case, the 2nd equal sign may be a '<' character + // for sub-millisecond pings. + // This heuristic seems to match the ping.exe output in any language. + sub := fmt.Appendf(nil, " %s: ", ip) + + eqSigns := func(bb []byte) (n int) { + for _, b := range bb { + if b == '=' || (b == '<' && n == 1) { + n++ + } + } + return + } + + for len(b) > 0 { + var line []byte + line, b, _ = bytes.Cut(b, []byte("\n")) + if _, rest, ok := bytes.Cut(line, sub); ok && eqSigns(rest) == 3 { + return true + } + } + return false +} diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 6be61cd58fa35..1bfc76fef097f 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -50,6 +50,7 @@ func TestInjectInboundLeak(t *testing.T) { Dialer: dialer, SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), }) if err != nil { t.Fatal(err) @@ -107,6 +108,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { Dialer: dialer, SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), }) if err != nil { tb.Fatal(err) diff --git a/wgengine/netstack/netstack_userping.go b/wgengine/netstack/netstack_userping.go index ab95b596233ec..ee635bd877dca 100644 --- a/wgengine/netstack/netstack_userping.go +++ b/wgengine/netstack/netstack_userping.go @@ -6,6 +6,7 @@ package netstack import ( + "errors" "net/netip" "os" "os/exec" @@ -26,7 +27,13 @@ func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) er var err error switch runtime.GOOS { case "windows": - err = exec.Command("ping", "-n", "1", "-w", "3000", dstIP.String()).Run() + var out []byte + out, err = exec.Command("ping", "-n", "1", "-w", "3000", dstIP.String()).CombinedOutput() + if err == nil && !windowsPingOutputIsSuccess(dstIP, out) { + // TODO(bradfitz,nickkhyl): return the actual ICMP error we heard back to the caller? + // For now we just drop it. + err = errors.New("unsuccessful ICMP reply received") + } case "freebsd": // Note: 2000 ms is actually 1 second + 2,000 // milliseconds extra for 3 seconds total. diff --git a/wgengine/netstack/netstack_userping_test.go b/wgengine/netstack/netstack_userping_test.go new file mode 100644 index 0000000000000..a179f74673469 --- /dev/null +++ b/wgengine/netstack/netstack_userping_test.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netstack + +import ( + "net/netip" + "testing" +) + +func TestWindowsPingOutputIsSuccess(t *testing.T) { + tests := []struct { + name string + ip string + out string + want bool + }{ + { + name: "success", + ip: "10.0.0.1", + want: true, + out: `Pinging 10.0.0.1 with 32 bytes of data: +Reply from 10.0.0.1: bytes=32 time=7ms TTL=64 + +Ping statistics for 10.0.0.1: + Packets: Sent = 1, Received = 1, Lost = 0 (0% loss), +Approximate round trip times in milli-seconds: + Minimum = 7ms, Maximum = 7ms, Average = 7ms +`, + }, + { + name: "success_sub_millisecond", + ip: "10.0.0.1", + want: true, + out: `Pinging 10.0.0.1 with 32 bytes of data: +Reply from 10.0.0.1: bytes=32 time<1ms TTL=64 + +Ping statistics for 10.0.0.1: + Packets: Sent = 1, Received = 1, Lost = 0 (0% loss), +Approximate round trip times in milli-seconds: + Minimum = 7ms, Maximum = 7ms, Average = 7ms +`, + }, + { + name: "success_german", + ip: "10.0.0.1", + want: true, + out: `Ping wird ausgeführt für 10.0.0.1 mit 32 Bytes Daten: +Antwort von from 10.0.0.1: Bytes=32 Zeit=7ms TTL=64 + +Ping-Statistik für 10.0.0.1: + Pakete: Gesendet = 4, Empfangen = 4, Verloren = 0 (0% Verlust), +Ca. Zeitangaben in Millisek.: + Minimum = 7ms, Maximum = 7ms, Mittelwert = 7ms +`, + }, + { + name: "unreachable", + ip: "10.0.0.6", + want: false, + out: `Pinging 10.0.0.6 with 32 bytes of data: +Reply from 10.0.108.189: Destination host unreachable + +Ping statistics for 10.0.0.6: + Packets: Sent = 1, Received = 1, Lost = 0 (0% loss), +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := windowsPingOutputIsSuccess(netip.MustParseAddr(tt.ip), []byte(tt.out)) + if got != tt.want { + t.Errorf("got %v; want %v", got, tt.want) + } + }) + } +} diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 59b1fccda5cd1..340c7e0f3f7be 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -5,14 +5,17 @@ package wgengine import ( "fmt" + "net/netip" "runtime" + "strings" "time" + "github.com/gaissmai/bart" "tailscale.com/net/flowtrack" "tailscale.com/net/packet" - "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" "tailscale.com/types/ipproto" + "tailscale.com/types/lazy" "tailscale.com/util/mak" "tailscale.com/wgengine/filter" ) @@ -86,6 +89,57 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp return } +var ( + appleIPRange = netip.MustParsePrefix("17.0.0.0/8") + canonicalIPs = lazy.SyncFunc(func() (checkIPFunc func(netip.Addr) bool) { + // https://bgp.he.net/AS41231#_prefixes + t := &bart.Table[bool]{} + for _, s := range strings.Fields(` + 91.189.89.0/24 + 91.189.91.0/24 + 91.189.92.0/24 + 91.189.93.0/24 + 91.189.94.0/24 + 91.189.95.0/24 + 162.213.32.0/24 + 162.213.34.0/24 + 162.213.35.0/24 + 185.125.188.0/23 + 185.125.190.0/24 + 194.169.254.0/24`) { + t.Insert(netip.MustParsePrefix(s), true) + } + return func(ip netip.Addr) bool { + v, _ := t.Lookup(ip) + return v + } + }) +) + +// isOSNetworkProbe reports whether the target is likely a network +// connectivity probe target from e.g. iOS or Ubuntu network-manager. +// +// iOS likes to probe Apple IPs on all interfaces to check for connectivity. +// Don't start timers tracking those. They won't succeed anyway. Avoids log +// spam like: +func (e *userspaceEngine) isOSNetworkProbe(dst netip.AddrPort) bool { + // iOS had log spam like: + // open-conn-track: timeout opening (100.115.73.60:52501 => 17.125.252.5:443); no associated peer node + if runtime.GOOS == "ios" && dst.Port() == 443 && appleIPRange.Contains(dst.Addr()) { + if _, ok := e.PeerForIP(dst.Addr()); !ok { + return true + } + } + // NetworkManager; https://github.com/tailscale/tailscale/issues/13687 + // open-conn-track: timeout opening (TCP 100.96.229.119:42798 => 185.125.190.49:80); no associated peer node + if runtime.GOOS == "linux" && dst.Port() == 80 && canonicalIPs()(dst.Addr()) { + if _, ok := e.PeerForIP(dst.Addr()); !ok { + return true + } + } + return false +} + func (e *userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { res = filter.Accept // always @@ -95,19 +149,12 @@ func (e *userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wra pp.TCPFlags&packet.TCPSyn == 0 { return } + if e.isOSNetworkProbe(pp.Dst) { + return + } flow := flowtrack.MakeTuple(pp.IPProto, pp.Src, pp.Dst) - // iOS likes to probe Apple IPs on all interfaces to check for connectivity. - // Don't start timers tracking those. They won't succeed anyway. Avoids log spam - // like: - // open-conn-track: timeout opening (100.115.73.60:52501 => 17.125.252.5:443); no associated peer node - if runtime.GOOS == "ios" && flow.DstPort() == 443 && !tsaddr.IsTailscaleIP(flow.DstAddr()) { - if _, ok := e.PeerForIP(flow.DstAddr()); !ok { - return - } - } - e.mu.Lock() defer e.mu.Unlock() if _, dup := e.pendOpen[flow]; dup { diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index f55361225a302..dce69550d909a 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -530,13 +530,24 @@ func (n *fakeIPTablesRunner) DNATWithLoadBalancer(netip.Addr, []netip.Addr) erro return errors.New("not implemented") } -func (n *fakeIPTablesRunner) AddSNATRuleForDst(src, dst netip.Addr) error { +func (n *fakeIPTablesRunner) EnsureSNATForDst(src, dst netip.Addr) error { return errors.New("not implemented") } func (n *fakeIPTablesRunner) DNATNonTailscaleTraffic(exemptInterface string, dst netip.Addr) error { return errors.New("not implemented") } +func (n *fakeIPTablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm linuxfw.PortMap) error { + return errors.New("not implemented") +} + +func (n *fakeIPTablesRunner) DeletePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm linuxfw.PortMap) error { + return errors.New("not implemented") +} + +func (n *fakeIPTablesRunner) DeleteSvc(svc, tun string, targetIPs []netip.Addr, pm []linuxfw.PortMap) error { + return errors.New("not implemented") +} func (n *fakeIPTablesRunner) ClampMSSToPMTU(tun string, addr netip.Addr) error { return errors.New("not implemented") diff --git a/wgengine/userspace.go b/wgengine/userspace.go index f6b4586cbeadb..fc204736a1da2 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -49,6 +49,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/testenv" + "tailscale.com/util/usermetric" "tailscale.com/version" "tailscale.com/wgengine/capture" "tailscale.com/wgengine/filter" @@ -195,6 +196,10 @@ type Config struct { // HealthTracker, if non-nil, is the health tracker to use. HealthTracker *health.Tracker + // Metrics is the usermetrics registry to use. + // Mandatory, if not set, an error is returned. + Metrics *usermetric.Registry + // Dialer is the dialer to use for outbound connections. // If nil, a new Dialer is created. Dialer *tsdial.Dialer @@ -249,6 +254,8 @@ func NewFakeUserspaceEngine(logf logger.Logf, opts ...any) (Engine, error) { conf.ControlKnobs = v case *health.Tracker: conf.HealthTracker = v + case *usermetric.Registry: + conf.Metrics = v default: return nil, fmt.Errorf("unknown option type %T", v) } @@ -267,6 +274,10 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) panic("NewUserspaceEngine called without HealthTracker (being strict in tests)") } + if conf.Metrics == nil { + return nil, errors.New("NewUserspaceEngine: opts.Metrics is required, please pass a *usermetric.Registry") + } + if conf.Tun == nil { logf("[v1] using fake (no-op) tun device") conf.Tun = tstun.NewFake() @@ -289,9 +300,9 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) var tsTUNDev *tstun.Wrapper if conf.IsTAP { - tsTUNDev = tstun.WrapTAP(logf, conf.Tun) + tsTUNDev = tstun.WrapTAP(logf, conf.Tun, conf.Metrics) } else { - tsTUNDev = tstun.Wrap(logf, conf.Tun) + tsTUNDev = tstun.Wrap(logf, conf.Tun, conf.Metrics) } closePool.add(tsTUNDev) @@ -387,6 +398,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) NoteRecvActivity: e.noteRecvActivity, NetMon: e.netMon, HealthTracker: e.health, + Metrics: conf.Metrics, ControlKnobs: conf.ControlKnobs, OnPortUpdate: onPortUpdate, PeerByKeyFunc: e.PeerByKey, diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 6610f1e92c35f..cc29be234d4ea 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -22,6 +22,7 @@ func TestIsNetstack(t *testing.T) { wgengine.Config{ SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), + Metrics: sys.UserMetricsRegistry(), }, ) if err != nil { @@ -72,6 +73,7 @@ func TestIsNetstackRouter(t *testing.T) { conf := tt.conf conf.SetSubsystem = sys.Set conf.HealthTracker = sys.HealthTracker() + conf.Metrics = sys.UserMetricsRegistry() e, err := wgengine.NewUserspaceEngine(logger.Discard, conf) if err != nil { t.Fatal(err) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 8763a84a14ef4..0514218625a60 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" + "tailscale.com/util/usermetric" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" ) @@ -100,7 +101,8 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { func TestUserspaceEngineReconfig(t *testing.T) { ht := new(health.Tracker) - e, err := NewFakeUserspaceEngine(t.Logf, 0, ht) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg) if err != nil { t.Fatal(err) } @@ -167,9 +169,10 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { // Keep making a wgengine until we find an unused port var ue *userspaceEngine ht := new(health.Tracker) + reg := new(usermetric.Registry) for i := range 100 { attempt := uint16(defaultPort + i) - e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht) + e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht, reg) if err != nil { t.Fatal(err) } @@ -249,7 +252,8 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { var knobs controlknobs.Knobs ht := new(health.Tracker) - e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg) if err != nil { t.Fatal(err) } diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index 0d4fcd8c1cabc..b05cd421fe309 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/health" + "tailscale.com/util/usermetric" ) func TestWatchdog(t *testing.T) { @@ -24,7 +25,8 @@ func TestWatchdog(t *testing.T) { t.Run("default watchdog does not fire", func(t *testing.T) { t.Parallel() ht := new(health.Tracker) - e, err := NewFakeUserspaceEngine(t.Logf, 0, ht) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg) if err != nil { t.Fatal(err) }