feat: upgrade go version to 1.23

This commit is contained in:
naison
2024-10-09 11:28:31 +00:00
parent d141ec869b
commit 4d075b29b3
111 changed files with 2156 additions and 740 deletions

9
.github/krew.yaml vendored
View File

@@ -5,12 +5,11 @@ metadata:
spec:
version: {{ .TagName }}
homepage: https://github.com/kubenetworks/kubevpn
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
shortDescription: "KubeVPN offers a Cloud Native Dev Environment that connects to kubernetes cluster network"
description: |
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and more.
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical environment, volume, and network setup. With KubeVPN, empower yourself to develop applications entirely on your local PC!
platforms:
- selector:

View File

@@ -10,12 +10,12 @@ jobs:
linux:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Setup Minikube
@@ -46,7 +46,7 @@ jobs:
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn

View File

@@ -11,12 +11,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -104,16 +104,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Helm tool installer
uses: Azure/setup-helm@v1
uses: azure/setup-helm@v4
with:
version: "v3.6.3"
- name: Change chart version
@@ -178,12 +178,12 @@ jobs:
needs: release-helm-chart
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
@@ -191,7 +191,7 @@ jobs:
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
uses: azure/setup-helm@v4
- name: Change chart version
run: |
VERSION=${GITHUB_REF#refs/*/}

View File

@@ -10,29 +10,29 @@ jobs:
image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
docker buildx create --use
export VERSION=test
make container
export VERSION=${{github.event.pull_request.head.sha}}
make container-test
linux:
runs-on: ubuntu-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Setup Minikube
id: minikube
@@ -61,7 +61,7 @@ jobs:
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -86,12 +86,12 @@ jobs:
runs-on: macos-13
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
# https://github.com/crazy-max/ghaction-setup-docker/issues/108
@@ -131,7 +131,7 @@ jobs:
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
make kubevpn-darwin-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -155,12 +155,12 @@ jobs:
runs-on: windows-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v3

View File

@@ -23,12 +23,12 @@ jobs:
arch: 386
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Build kubevpn
run: |

View File

@@ -17,7 +17,6 @@ NAMESPACE ?= naison
REPOSITORY ?= kubevpn
IMAGE ?= $(REGISTRY)/$(NAMESPACE)/$(REPOSITORY):$(VERSION)
IMAGE_LATEST ?= docker.io/naison/kubevpn:latest
IMAGE_TEST ?= docker.io/naison/kubevpn:test
IMAGE_GH ?= ghcr.io/kubenetworks/kubevpn:$(VERSION)
# Setup the -ldflags option for go build here, interpolate the variable values
@@ -96,7 +95,7 @@ container-local: kubevpn-linux-amd64
.PHONY: container-test
container-test: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_TEST} -f $(BUILD_DIR)/test.Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
.PHONY: version
version:

9
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/wencaiwulue/kubevpn/v2
go 1.22.1
go 1.23.2
require (
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
@@ -62,7 +62,7 @@ require (
sigs.k8s.io/controller-runtime v0.18.4
sigs.k8s.io/kustomize/api v0.16.0
sigs.k8s.io/yaml v1.4.0
tailscale.com v1.72.1
tailscale.com v1.74.1
)
require (
@@ -173,7 +173,7 @@ require (
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/illarion/gonotify v1.0.1 // indirect
github.com/illarion/gonotify/v2 v2.0.3 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/infobloxopen/go-trees v0.0.0-20221216143356-66ceba885ebc // indirect
@@ -244,10 +244,9 @@ require (
github.com/stretchr/testify v1.9.0 // indirect
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tinylib/msgp v1.1.9 // indirect
github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect
github.com/x448/float16 v0.8.4 // indirect

14
go.sum
View File

@@ -388,8 +388,8 @@ github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3s
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio=
github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE=
github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A=
github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@@ -699,8 +699,8 @@ github.com/syncthing/syncthing v1.27.7 h1:N06QpAUPQi2VaB+X0wko5h8JtH3qJP5Dd4MYq9
github.com/syncthing/syncthing v1.27.7/go.mod h1:bMhGhc70k3UFszFhmCVNxM5bOsDfhxYMHvj4lWlIYBk=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk=
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
github.com/thejerf/suture/v4 v4.0.5 h1:F1E/4FZwXWqvlWDKEUo6/ndLtxGAUzMmNqkrMknZbAA=
github.com/thejerf/suture/v4 v4.0.5/go.mod h1:gu9Y4dXNUWFrByqRt30Rm9/UZ0wzRSt9AJS6xu/ZGxU=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
@@ -709,8 +709,6 @@ github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU=
github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
@@ -1055,5 +1053,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
tailscale.com v1.72.1 h1:hk82jek36ph2S3Tfsh57NVWKEm/pZ9nfUonvlowpfaA=
tailscale.com v1.72.1/go.mod h1:v7OHtg0KLAnhOVf81Z8WrjNefj238QbFhgkWJQoKxbs=
tailscale.com v1.74.1 h1:qhhkN+0gFZasczi+0n0eBxwfP/ZaUr+05cWdsOQ3GT0=
tailscale.com v1.74.1/go.mod h1:3iACpCONQ4lauDXvwfoGlwNCpfbVxjdc2j6G9EuFOW8=

View File

@@ -5,12 +5,11 @@ metadata:
spec:
version: v2.2.18
homepage: https://github.com/kubenetworks/kubevpn
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
shortDescription: "KubeVPN offers a Cloud Native Dev Environment that connects to kubernetes cluster network"
description: |
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and more.
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical environment, volume, and network setup. With KubeVPN, empower yourself to develop applications entirely on your local PC!
platforms:
- selector:

View File

@@ -1,4 +0,0 @@
language: go
go:
- "1.x"

View File

@@ -1,144 +0,0 @@
// +build linux
package gonotify
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"unsafe"
)
// Inotify is the low level wrapper around inotify_init(), inotify_add_watch() and inotify_rm_watch()
type Inotify struct {
m sync.Mutex
fd int
f *os.File
watches map[string]uint32
rwatches map[uint32]string
}
// NewInotify creates new inotify instance
func NewInotify() (*Inotify, error) {
fd, err := syscall.InotifyInit1(syscall.IN_CLOEXEC)
if err != nil {
return nil, err
}
return &Inotify{
fd: fd,
f: os.NewFile(uintptr(fd), ""),
watches: make(map[string]uint32),
rwatches: make(map[uint32]string),
}, nil
}
// AddWatch adds given path to list of watched files / folders
func (i *Inotify) AddWatch(pathName string, mask uint32) error {
w, err := syscall.InotifyAddWatch(i.fd, pathName, mask)
if err != nil {
return err
}
i.m.Lock()
i.watches[pathName] = uint32(w)
i.rwatches[uint32(w)] = pathName
i.m.Unlock()
return nil
}
// RmWd removes watch by watch descriptor
func (i *Inotify) RmWd(wd uint32) error {
i.m.Lock()
defer i.m.Unlock()
pathName, ok := i.rwatches[wd]
if !ok {
return nil
}
_, err := syscall.InotifyRmWatch(i.fd, wd)
if err != nil {
return err
}
delete(i.watches, pathName)
delete(i.rwatches, wd)
return nil
}
// RmWatch removes watch by pathName
func (i *Inotify) RmWatch(pathName string) error {
i.m.Lock()
defer i.m.Unlock()
wd, ok := i.watches[pathName]
if !ok {
return nil
}
_, err := syscall.InotifyRmWatch(i.fd, wd)
if err != nil {
return err
}
delete(i.watches, pathName)
delete(i.rwatches, wd)
return nil
}
// Read reads portion of InotifyEvents and may fail with an error
func (i *Inotify) Read() ([]InotifyEvent, error) {
events := make([]InotifyEvent, 0, 1024)
buf := make([]byte, 1024*(syscall.SizeofInotifyEvent+16))
n, err := i.f.Read(buf)
if err != nil {
return events, err
}
if n < syscall.SizeofInotifyEvent {
return events, fmt.Errorf("Short inotify read")
}
offset := 0
for offset+syscall.SizeofInotifyEvent <= n {
event := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
namebuf := buf[offset+syscall.SizeofInotifyEvent : offset+syscall.SizeofInotifyEvent+int(event.Len)]
offset += syscall.SizeofInotifyEvent + int(event.Len)
name := strings.TrimRight(string(namebuf), "\x00")
name = filepath.Join(i.rwatches[uint32(event.Wd)], name)
events = append(events, InotifyEvent{
Wd: uint32(event.Wd),
Name: name,
Mask: event.Mask,
Cookie: event.Cookie,
})
}
return events, nil
}
// Close should be called when inotify is no longer needed in order to cleanup used resources.
func (i *Inotify) Close() error {
i.m.Lock()
defer i.m.Unlock()
for _, w := range i.watches {
_, err := syscall.InotifyRmWatch(i.fd, w)
if err != nil {
return err
}
}
return i.f.Close()
}

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2018 Illarion Kovalchuk
Copyright (c) 2018-2023 Ilarion Kovalchuk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -2,8 +2,7 @@
Simple Golang inotify wrapper.
[![Build Status](https://travis-ci.org/illarion/gonotify.svg?branch=master)](https://travis-ci.org/illarion/gonotify)
[![GoDoc](https://godoc.org/github.com/illarion/gonotify?status.svg)](https://godoc.org/github.com/illarion/gonotify)
[![GoDoc](https://godoc.org/github.com/illarion/gonotify/v2?status.svg)](https://godoc.org/github.com/illarion/gonotify/v2)
### Provides following primitives:
@@ -18,6 +17,51 @@ Simple Golang inotify wrapper.
Use `FileWatcher` and `DirWatcher` as an example and build your own utility classes.
### Usage
```go
package main
import (
"fmt"
"github.com/illarion/gonotify/v2"
"time"
"context"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
watcher, err := gonotify.NewDirWatcher(ctx, gonotify.IN_CREATE|gonotify.IN_CLOSE, "/tmp")
if err != nil {
panic(err)
}
for {
select {
case event := <-watcher.C:
fmt.Printf("Event: %s\n", event)
if event.Mask&gonotify.IN_CREATE != 0 {
fmt.Printf("File created: %s\n", event.Name)
}
if event.Mask&gonotify.IN_CLOSE != 0 {
fmt.Printf("File closed: %s\n", event.Name)
}
case <-time.After(5 * time.Second):
fmt.Println("Timeout")
cancel()
return
}
}
}
```
## License
MIT. See LICENSE file for more details.

View File

@@ -1,6 +1,7 @@
package gonotify
import (
"context"
"os"
"path/filepath"
)
@@ -9,20 +10,21 @@ import (
// Events can be masked by providing fileMask. DirWatcher does not generate events for
// folders or subfolders.
type DirWatcher struct {
stopC chan struct{}
C chan FileEvent
}
// NewDirWatcher creates DirWatcher recursively waiting for events in the given root folder and
// emitting FileEvents in channel C, that correspond to fileMask. Folder events are ignored (having IN_ISDIR set to 1)
func NewDirWatcher(fileMask uint32, root string) (*DirWatcher, error) {
func NewDirWatcher(ctx context.Context, fileMask uint32, root string) (*DirWatcher, error) {
dw := &DirWatcher{
stopC: make(chan struct{}),
C: make(chan FileEvent),
}
i, err := NewInotify()
ctx, cancel := context.WithCancel(ctx)
i, err := NewInotify(ctx)
if err != nil {
cancel()
return nil, err
}
@@ -50,7 +52,7 @@ func NewDirWatcher(fileMask uint32, root string) (*DirWatcher, error) {
})
if err != nil {
i.Close()
cancel()
return nil, err
}
@@ -128,13 +130,14 @@ func NewDirWatcher(fileMask uint32, root string) (*DirWatcher, error) {
go func() {
for {
select {
case <-dw.stopC:
i.Close()
case <-ctx.Done():
return
case event, ok := <-events:
if !ok {
dw.C <- FileEvent{
Eof: true,
}
cancel()
return
}
@@ -151,10 +154,3 @@ func NewDirWatcher(fileMask uint32, root string) (*DirWatcher, error) {
return dw, nil
}
func (d *DirWatcher) Close() {
select {
case d.stopC <- struct{}{}:
default:
}
}

View File

@@ -1,3 +1,4 @@
//go:build linux
// +build linux
package gonotify
@@ -90,11 +91,35 @@ type InotifyEvent struct {
}
func (i InotifyEvent) GoString() string {
return fmt.Sprintf("gonotify.InotifyEvent{Wd=%#v, Name=%s, Cookie=%#v, Mask=%#v=%s", i.Wd, i.Name, i.Cookie, i.Mask, InMaskToString(i.Mask))
return fmt.Sprintf("gonotify.InotifyEvent{Wd=%#v, Name=%s, Cookie=%#v, Mask=%#v=%s}", i.Wd, i.Name, i.Cookie, i.Mask, InMaskToString(i.Mask))
}
func (i InotifyEvent) String() string {
return fmt.Sprintf("{Wd=%d, Name=%s, Cookie=%d, Mask=%s", i.Wd, i.Name, i.Cookie, InMaskToString(i.Mask))
return fmt.Sprintf("{Wd=%d, Name=%s, Cookie=%d, Mask=%s}", i.Wd, i.Name, i.Cookie, InMaskToString(i.Mask))
}
// IsAny returns true if any of the in_mask is set in the event
func (i InotifyEvent) IsAny(in_mask ...uint32) bool {
for _, mask := range in_mask {
if i.Mask&mask == mask {
return true
}
}
return false
}
// IsAll returns true if all the in_masks is set in the event
func (i InotifyEvent) IsAll(in_mask ...uint32) bool {
for _, mask := range in_mask {
if i.Mask&mask != mask {
return false
}
}
return true
}
func (i InotifyEvent) Is(in_mask uint32) bool {
return i.Mask&in_mask == in_mask
}
// FileEvent is the wrapper around InotifyEvent with additional Eof marker. Reading from
@@ -103,3 +128,11 @@ type FileEvent struct {
InotifyEvent
Eof bool
}
func (f FileEvent) GoString() string {
return fmt.Sprintf("gonotify.FileEvent{InotifyEvent=%#v, Eof=%#v}", f.InotifyEvent, f.Eof)
}
func (f FileEvent) String() string {
return fmt.Sprintf("{InotifyEvent=%s, Eof=%v}", f.InotifyEvent, f.Eof)
}

View File

@@ -1,24 +1,28 @@
package gonotify
import "path/filepath"
import (
"context"
"path/filepath"
)
// FileWatcher waits for events generated by filesystem for a specific list of file paths, including
// IN_CREATE for not yet existing files and IN_DELETE for removed.
type FileWatcher struct {
stopC chan struct{}
C chan FileEvent
}
// NewFileWatcher creates FileWatcher with provided inotify mask and list of files to wait events for.
func NewFileWatcher(mask uint32, files ...string) (*FileWatcher, error) {
func NewFileWatcher(ctx context.Context, mask uint32, files ...string) (*FileWatcher, error) {
f := &FileWatcher{
stopC: make(chan struct{}),
C: make(chan FileEvent),
}
inotify, err := NewInotify()
ctx, cancel := context.WithCancel(ctx)
inotify, err := NewInotify(ctx)
if err != nil {
cancel()
return nil, err
}
@@ -27,7 +31,7 @@ func NewFileWatcher(mask uint32, files ...string) (*FileWatcher, error) {
for _, file := range files {
err := inotify.AddWatch(filepath.Dir(file), mask)
if err != nil {
inotify.Close()
cancel()
return nil, err
}
expectedPaths[file] = true
@@ -36,6 +40,7 @@ func NewFileWatcher(mask uint32, files ...string) (*FileWatcher, error) {
events := make(chan FileEvent)
go func() {
defer cancel()
for {
raw, err := inotify.Read()
@@ -45,19 +50,23 @@ func NewFileWatcher(mask uint32, files ...string) (*FileWatcher, error) {
}
for _, event := range raw {
events <- FileEvent{
select {
case <-ctx.Done():
return
case events <- FileEvent{
InotifyEvent: event,
}: //noop
}
}
}
}()
go func() {
defer cancel()
for {
select {
case <-f.stopC:
inotify.Close()
case <-ctx.Done():
return
case event, ok := <-events:
if !ok {
@@ -78,7 +87,3 @@ func NewFileWatcher(mask uint32, files ...string) (*FileWatcher, error) {
return f, nil
}
func (f *FileWatcher) Close() {
f.stopC <- struct{}{}
}

290
vendor/github.com/illarion/gonotify/v2/inotify.go generated vendored Normal file
View File

@@ -0,0 +1,290 @@
//go:build linux
// +build linux
package gonotify
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"syscall"
"time"
"unsafe"
)
// max number of events to read at once
const maxEvents = 1024
var TimeoutError = errors.New("Inotify timeout")
type getWatchRequest struct {
pathName string
result chan uint32
}
type getPathRequest struct {
wd uint32
result chan string
}
type addWatchRequest struct {
pathName string
wd uint32
}
// Inotify is the low level wrapper around inotify_init(), inotify_add_watch() and inotify_rm_watch()
type Inotify struct {
// ctx is the context of inotify instance
ctx context.Context
// fd is the file descriptor of inotify instance
fd int
// getWatchByPathIn is the channel for getting watch descriptor by path
getWatchByPathIn chan getWatchRequest
// getPathByWatchIn is the channel for getting path by watch descriptor
getPathByWatchIn chan getPathRequest
// addWatchIn is the channel for adding watch
addWatchIn chan addWatchRequest
// rmByWdIn is the channel for removing watch by watch descriptor
rmByWdIn chan uint32
// rmByPathIn is the channel for removing watch by path
rmByPathIn chan string
}
// NewInotify creates new inotify instance
func NewInotify(ctx context.Context) (*Inotify, error) {
fd, err := syscall.InotifyInit1(syscall.IN_CLOEXEC | syscall.IN_NONBLOCK)
if err != nil {
return nil, err
}
inotify := &Inotify{
ctx: ctx,
fd: fd,
getPathByWatchIn: make(chan getPathRequest),
getWatchByPathIn: make(chan getWatchRequest),
addWatchIn: make(chan addWatchRequest),
rmByWdIn: make(chan uint32),
rmByPathIn: make(chan string),
}
go func() {
watches := make(map[string]uint32)
paths := make(map[uint32]string)
for {
select {
case <-ctx.Done():
for _, w := range watches {
_, err := syscall.InotifyRmWatch(fd, w)
if err != nil {
continue
}
}
syscall.Close(fd)
return
case req := <-inotify.addWatchIn:
watches[req.pathName] = req.wd
paths[req.wd] = req.pathName
case req := <-inotify.getWatchByPathIn:
wd, ok := watches[req.pathName]
if !ok {
close(req.result)
}
req.result <- wd
close(req.result)
case req := <-inotify.getPathByWatchIn:
pathName, ok := paths[req.wd]
if !ok {
close(req.result)
}
req.result <- pathName
close(req.result)
case wd := <-inotify.rmByWdIn:
pathName, ok := paths[wd]
if !ok {
continue
}
delete(watches, pathName)
delete(paths, wd)
case pathName := <-inotify.rmByPathIn:
wd, ok := watches[pathName]
if !ok {
continue
}
delete(watches, pathName)
delete(paths, wd)
}
}
}()
return inotify, nil
}
// AddWatch adds given path to list of watched files / folders
func (i *Inotify) AddWatch(pathName string, mask uint32) error {
w, err := syscall.InotifyAddWatch(i.fd, pathName, mask)
if err != nil {
return err
}
select {
case <-i.ctx.Done():
return i.ctx.Err()
case i.addWatchIn <- addWatchRequest{
pathName: pathName,
wd: uint32(w)}:
return nil
}
}
// RmWd removes watch by watch descriptor
func (i *Inotify) RmWd(wd uint32) error {
select {
case <-i.ctx.Done():
return i.ctx.Err()
case i.rmByWdIn <- wd:
return nil
}
}
// RmWatch removes watch by pathName
func (i *Inotify) RmWatch(pathName string) error {
select {
case <-i.ctx.Done():
return i.ctx.Err()
case i.rmByPathIn <- pathName:
return nil
}
}
// Read reads portion of InotifyEvents and may fail with an error. If no events are available, it will
// wait forever, until context is cancelled.
func (i *Inotify) Read() ([]InotifyEvent, error) {
for {
evts, err := i.ReadDeadline(time.Now().Add(time.Millisecond * 200))
if err != nil {
if err == TimeoutError {
continue
}
return evts, err
}
if len(evts) > 0 {
return evts, nil
}
}
}
// ReadDeadline waits for InotifyEvents until deadline is reached, or context is cancelled. If
// deadline is reached, TimeoutError is returned.
func (i *Inotify) ReadDeadline(deadline time.Time) ([]InotifyEvent, error) {
events := make([]InotifyEvent, 0, maxEvents)
buf := make([]byte, maxEvents*(syscall.SizeofInotifyEvent+syscall.NAME_MAX+1))
var n int
var err error
fdset := &syscall.FdSet{}
//main:
for {
if i.ctx.Err() != nil {
return events, i.ctx.Err()
}
now := time.Now()
if now.After(deadline) {
return events, TimeoutError
}
diff := deadline.Sub(now)
timeout := syscall.NsecToTimeval(diff.Nanoseconds())
fdset.Bits[0] = 1 << uint(i.fd)
_, err = syscall.Select(i.fd+1, fdset, nil, nil, &timeout)
if err != nil {
if err == syscall.EINTR {
continue
}
return events, err
}
if fdset.Bits[0]&(1<<uint(i.fd)) == 0 {
continue // No data to read, continue waiting
}
n, err = syscall.Read(i.fd, buf)
if err != nil {
if err == syscall.EAGAIN {
continue
}
return events, err
}
if n > 0 {
break
}
}
if n < syscall.SizeofInotifyEvent {
return events, fmt.Errorf("short inotify read, expected at least one SizeofInotifyEvent %d, got %d", syscall.SizeofInotifyEvent, n)
}
offset := 0
for offset+syscall.SizeofInotifyEvent <= n {
event := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
var name string
{
nameStart := offset + syscall.SizeofInotifyEvent
nameEnd := offset + syscall.SizeofInotifyEvent + int(event.Len)
if nameEnd > n {
return events, fmt.Errorf("corrupted inotify event length %d", event.Len)
}
name = strings.TrimRight(string(buf[nameStart:nameEnd]), "\x00")
offset = nameEnd
}
req := getPathRequest{
wd: uint32(event.Wd),
result: make(chan string),
}
select {
case <-i.ctx.Done():
return events, i.ctx.Err()
case i.getPathByWatchIn <- req:
select {
case <-i.ctx.Done():
return events, i.ctx.Err()
case watchName := <-req.result:
name = filepath.Join(watchName, name)
}
}
events = append(events, InotifyEvent{
Wd: uint32(event.Wd),
Name: name,
Mask: event.Mask,
Cookie: event.Cookie,
})
}
return events, nil
}

View File

@@ -17,4 +17,4 @@ before_script:
- sudo modprobe sch_sfq
install:
- go get -v -t ./...
go_import_path: github.com/vishvananda/netlink
go_import_path: github.com/tailscale/netlink

View File

@@ -19,7 +19,7 @@ $(call goroot,$(DEPS)):
.PHONY: $(call testdirs,$(DIRS))
$(call testdirs,$(DIRS)):
go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@
go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/tailscale/netlink/$@
$(call fmt,$(call testdirs,$(DIRS))):
! gofmt -l $(subst fmt-,,$@)/*.go | grep -q .

View File

@@ -1,6 +1,6 @@
# netlink - netlink library for go #
[![Build Status](https://app.travis-ci.com/vishvananda/netlink.svg?branch=master)](https://app.travis-ci.com/github/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink)
[![Build Status](https://app.travis-ci.com/vishvananda/netlink.svg?branch=master)](https://app.travis-ci.com/github/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/tailscale/netlink?status.svg)](https://godoc.org/github.com/tailscale/netlink)
The netlink package provides a simple netlink library for go. Netlink
is the interface a user-space program in linux uses to communicate with
@@ -20,7 +20,7 @@ functionality like ipsec xfrm handling.
You can use go get command:
go get github.com/vishvananda/netlink
go get github.com/tailscale/netlink
Testing dependencies:
@@ -28,7 +28,7 @@ Testing dependencies:
Testing (requires root):
sudo -E go test github.com/vishvananda/netlink
sudo -E go test github.com/tailscale/netlink
## Examples ##
@@ -39,7 +39,7 @@ package main
import (
"fmt"
"github.com/vishvananda/netlink"
"github.com/tailscale/netlink"
)
func main() {
@@ -66,7 +66,7 @@ Add a new ip address to loopback:
package main
import (
"github.com/vishvananda/netlink"
"github.com/tailscale/netlink"
)
func main() {

View File

@@ -6,7 +6,7 @@ import (
"strings"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)

View File

@@ -3,7 +3,7 @@ package netlink
import (
"fmt"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -8,7 +8,7 @@ import (
"fmt"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -8,7 +8,7 @@ import (
"net"
"time"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -6,7 +6,7 @@ import (
"strings"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -9,7 +9,7 @@ import (
"net"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)
@@ -53,6 +53,7 @@ func (filter *U32) Type() string {
// Fw filter filters on firewall marks
// NOTE: this is in filter_linux because it refers to nl.TcPolice which
//
// is defined in nl/tc_linux.go
type Fw struct {
FilterAttrs

View File

@@ -1,3 +1,4 @@
//go:build linux
// +build linux
package netlink
@@ -6,7 +7,7 @@ import (
"encoding/binary"
"errors"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -6,7 +6,7 @@ import (
"strings"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"time"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)

View File

@@ -6,7 +6,7 @@ import (
"net"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -12,7 +12,7 @@ import (
"syscall"
"unsafe"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)

View File

@@ -6,7 +6,7 @@ import (
"syscall"
"unsafe"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)

View File

@@ -1,6 +1,6 @@
package netlink
import "github.com/vishvananda/netlink/nl"
import "github.com/tailscale/netlink/nl"
// Family type definitions
const (

View File

@@ -15,7 +15,7 @@ package netlink
import (
"fmt"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -28,7 +28,6 @@ const (
RECEIVE_BUFFER_SIZE = 65536
// Kernel netlink pid
PidKernel uint32 = 0
SizeofCnMsgOp = 0x18
)
// SupportedNlFamilies contains the list of netlink families this netlink package supports
@@ -39,9 +38,6 @@ var nextSeqNr uint32
// Default netlink socket timeout, 60s
var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0}
// ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets
var EnableErrorMessageReporting bool = false
// GetIPFamily returns the family type of a net.IP.
func GetIPFamily(ip net.IP) int {
if len(ip) <= net.IPv4len {
@@ -84,69 +80,11 @@ func Swap32(i uint32) uint32 {
return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24
}
const (
NLMSGERR_ATTR_UNUSED = 0
NLMSGERR_ATTR_MSG = 1
NLMSGERR_ATTR_OFFS = 2
NLMSGERR_ATTR_COOKIE = 3
NLMSGERR_ATTR_POLICY = 4
)
type NetlinkRequestData interface {
Len() int
Serialize() []byte
}
const (
PROC_CN_MCAST_LISTEN = 1
PROC_CN_MCAST_IGNORE
)
type CbID struct {
Idx uint32
Val uint32
}
type CnMsg struct {
ID CbID
Seq uint32
Ack uint32
Length uint16
Flags uint16
}
type CnMsgOp struct {
CnMsg
// here we differ from the C header
Op uint32
}
func NewCnMsg(idx, val, op uint32) *CnMsgOp {
var cm CnMsgOp
cm.ID.Idx = idx
cm.ID.Val = val
cm.Ack = 0
cm.Seq = 1
cm.Length = uint16(binary.Size(op))
cm.Op = op
return &cm
}
func (msg *CnMsgOp) Serialize() []byte {
return (*(*[SizeofCnMsgOp]byte)(unsafe.Pointer(msg)))[:]
}
func DeserializeCnMsgOp(b []byte) *CnMsgOp {
return (*CnMsgOp)(unsafe.Pointer(&b[0:SizeofCnMsgOp][0]))
}
func (msg *CnMsgOp) Len() int {
return SizeofCnMsgOp
}
// IfInfomsg is related to links, but it is used for list requests as well
type IfInfomsg struct {
unix.IfInfomsg
@@ -314,12 +252,6 @@ func (msg *IfInfomsg) EncapType() string {
return fmt.Sprintf("unknown%d", msg.Type)
}
// Round the length of a netlink message up to align it properly.
// Taken from syscall/netlink_linux.go by The Go Authors under BSD-style license.
func nlmAlignOf(msglen int) int {
return (msglen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1)
}
func rtaAlignOf(attrlen int) int {
return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1)
}
@@ -504,11 +436,6 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro
if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil {
return nil, err
}
if EnableErrorMessageReporting {
if err := s.SetExtAck(true); err != nil {
return nil, err
}
}
defer s.Close()
} else {
@@ -548,37 +475,11 @@ done:
}
if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR {
native := NativeEndian()
errno := int32(native.Uint32(m.Data[0:4]))
if errno == 0 {
error := int32(native.Uint32(m.Data[0:4]))
if error == 0 {
break done
}
var err error
err = syscall.Errno(-errno)
unreadData := m.Data[4:]
if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr {
// Skip the echoed request message.
echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0]))
unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):]
// Annotate `err` using nlmsgerr attributes.
for len(unreadData) >= syscall.SizeofRtAttr {
attr := (*syscall.RtAttr)(unsafe.Pointer(&unreadData[0]))
attrData := unreadData[syscall.SizeofRtAttr:attr.Len]
switch attr.Type {
case NLMSGERR_ATTR_MSG:
err = fmt.Errorf("%w: %s", err, string(attrData))
default:
// TODO: handle other NLMSGERR_ATTR types
}
unreadData = unreadData[rtaAlignOf(int(attr.Len)):]
}
}
return nil, err
return nil, syscall.Errno(-error)
}
if resType != 0 && m.Header.Type != resType {
continue
@@ -793,16 +694,6 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error {
return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout)
}
// SetExtAck requests error messages to be reported on the socket
func (s *NetlinkSocket) SetExtAck(enable bool) error {
var enableN int
if enable {
enableN = 1
}
return unix.SetsockoptInt(int(s.fd), unix.SOL_NETLINK, unix.NETLINK_EXT_ACK, enableN)
}
func (s *NetlinkSocket) GetPid() (uint32, error) {
fd := int(atomic.LoadInt32(&s.fd))
lsa, err := unix.Getsockname(fd)

View File

@@ -90,7 +90,6 @@ const (
SizeofTcU32Sel = 0x10 // without keys
SizeofTcGen = 0x14
SizeofTcConnmark = SizeofTcGen + 0x04
SizeofTcCsum = SizeofTcGen + 0x04
SizeofTcMirred = SizeofTcGen + 0x08
SizeofTcTunnelKey = SizeofTcGen + 0x04
SizeofTcSkbEdit = SizeofTcGen
@@ -695,36 +694,6 @@ func (x *TcConnmark) Serialize() []byte {
return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:]
}
const (
TCA_CSUM_UNSPEC = iota
TCA_CSUM_PARMS
TCA_CSUM_TM
TCA_CSUM_PAD
TCA_CSUM_MAX = TCA_CSUM_PAD
)
// struct tc_csum {
// tc_gen;
// __u32 update_flags;
// }
type TcCsum struct {
TcGen
UpdateFlags uint32
}
func (msg *TcCsum) Len() int {
return SizeofTcCsum
}
func DeserializeTcCsum(b []byte) *TcCsum {
return (*TcCsum)(unsafe.Pointer(&b[0:SizeofTcCsum][0]))
}
func (x *TcCsum) Serialize() []byte {
return (*(*[SizeofTcCsum]byte)(unsafe.Pointer(x)))[:]
}
const (
TCA_ACT_MIRRED = 8
)

View File

@@ -3,7 +3,7 @@ package netlink
import (
"encoding/binary"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
)
var (

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -7,7 +7,7 @@ import (
"strings"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -6,7 +6,7 @@ import (
"fmt"
"net"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)
@@ -307,6 +307,7 @@ func (h *Handle) RdmaLinkDel(name string) error {
// RdmaLinkAdd adds an rdma link for the specified type to the network device.
// Similar to: rdma link add NAME type TYPE netdev NETDEV
//
// NAME - specifies the new name of the rdma link to add
// TYPE - specifies which rdma type to use. Link types:
// rxe - Soft RoCE driver

View File

@@ -9,7 +9,7 @@ import (
"strings"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"net"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -6,7 +6,7 @@ import (
"net"
"syscall"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -3,7 +3,7 @@ package netlink
import (
"fmt"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"github.com/vishvananda/netns"
"golang.org/x/sys/unix"
)

View File

@@ -1,7 +1,7 @@
package netlink
import (
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"unsafe"
"github.com/vishvananda/netlink/nl"
"github.com/tailscale/netlink/nl"
"golang.org/x/sys/unix"
)

View File

@@ -1,192 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Vishvananda Ishaya.
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/modules.txt vendored
View File

@@ -780,9 +780,9 @@ github.com/hpcloud/tail/ratelimiter
github.com/hpcloud/tail/util
github.com/hpcloud/tail/watch
github.com/hpcloud/tail/winfile
# github.com/illarion/gonotify v1.0.1
# github.com/illarion/gonotify/v2 v2.0.3
## explicit; go 1.12
github.com/illarion/gonotify
github.com/illarion/gonotify/v2
# github.com/imdario/mergo v0.3.16
## explicit; go 1.13
github.com/imdario/mergo
@@ -1209,9 +1209,10 @@ github.com/syndtr/goleveldb/leveldb/opt
github.com/syndtr/goleveldb/leveldb/storage
github.com/syndtr/goleveldb/leveldb/table
github.com/syndtr/goleveldb/leveldb/util
# github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85
# github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7
## explicit; go 1.12
github.com/tailscale/netlink
github.com/tailscale/netlink/nl
# github.com/thejerf/suture/v4 v4.0.5
## explicit; go 1.9
github.com/thejerf/suture/v4
@@ -1234,9 +1235,6 @@ github.com/theupdateframework/notary/tuf/validation
# github.com/tinylib/msgp v1.1.9
## explicit; go 1.18
github.com/tinylib/msgp/msgp
# github.com/vishvananda/netlink v1.2.1-beta.2
## explicit; go 1.12
github.com/vishvananda/netlink/nl
# github.com/vishvananda/netns v0.0.4
## explicit; go 1.17
github.com/vishvananda/netns
@@ -2392,15 +2390,17 @@ sigs.k8s.io/structured-merge-diff/v4/value
sigs.k8s.io/yaml
sigs.k8s.io/yaml/goyaml.v2
sigs.k8s.io/yaml/goyaml.v3
# tailscale.com v1.72.1
## explicit; go 1.22.0
# tailscale.com v1.74.1
## explicit; go 1.23
tailscale.com
tailscale.com/atomicfile
tailscale.com/control/controlknobs
tailscale.com/envknob
tailscale.com/health
tailscale.com/hostinfo
tailscale.com/kube/kubetypes
tailscale.com/logtail/backoff
tailscale.com/metrics
tailscale.com/net/dns
tailscale.com/net/dns/publicdns
tailscale.com/net/dns/resolvconffile
@@ -2422,6 +2422,7 @@ tailscale.com/tsconst
tailscale.com/tstime
tailscale.com/tstime/mono
tailscale.com/tstime/rate
tailscale.com/tsweb/varz
tailscale.com/types/dnstype
tailscale.com/types/ipproto
tailscale.com/types/key
@@ -2439,6 +2440,7 @@ tailscale.com/util/clientmetric
tailscale.com/util/cloudenv
tailscale.com/util/cmpver
tailscale.com/util/ctxkey
tailscale.com/util/dirwalk
tailscale.com/util/dnsname
tailscale.com/util/lineread
tailscale.com/util/linuxfw
@@ -2450,6 +2452,7 @@ tailscale.com/util/set
tailscale.com/util/singleflight
tailscale.com/util/slicesx
tailscale.com/util/testenv
tailscale.com/util/usermetric
tailscale.com/util/vizerror
tailscale.com/util/winutil
tailscale.com/util/winutil/gp

6
vendor/tailscale.com/.gitignore generated vendored
View File

@@ -43,3 +43,9 @@ client/web/build/assets
/gocross
/dist
# Ignore xcode userstate and workspace data
*.xcuserstate
*.xcworkspacedata
/tstest/tailmac/bin
/tstest/tailmac/build

2
vendor/tailscale.com/Dockerfile generated vendored
View File

@@ -27,7 +27,7 @@
# $ docker exec tailscaled tailscale status
FROM golang:1.22-alpine AS build-env
FROM golang:1.23-alpine AS build-env
WORKDIR /go/src/tailscale

3
vendor/tailscale.com/Makefile generated vendored
View File

@@ -117,7 +117,8 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container
echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:mantic" && docker build --build-arg="BASE=ubuntu:mantic" -t ssh-ubuntu-mantic ssh/tailssh/testcontainers && \
echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers
echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \
echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers
help: ## Show this help
@echo "\nSpecify a command. The choices are:\n"

2
vendor/tailscale.com/README.md generated vendored
View File

@@ -37,7 +37,7 @@ not open source.
## Building
We always require the latest Go release, currently Go 1.22. (While we build
We always require the latest Go release, currently Go 1.23. (While we build
releases with our [Go fork](https://github.com/tailscale/go/), its use is not
required.)

2
vendor/tailscale.com/VERSION.txt generated vendored
View File

@@ -1 +1 @@
1.72.1
1.74.1

View File

@@ -20,16 +20,18 @@ import (
"fmt"
"io"
"log"
"maps"
"os"
"path/filepath"
"runtime"
"sort"
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"tailscale.com/kube/kubetypes"
"tailscale.com/types/opt"
"tailscale.com/version"
"tailscale.com/version/distro"
@@ -76,12 +78,7 @@ func LogCurrent(logf logf) {
mu.Lock()
defer mu.Unlock()
list := make([]string, 0, len(set))
for k := range set {
list = append(list, k)
}
sort.Strings(list)
for _, k := range list {
for _, k := range slices.Sorted(maps.Keys(set)) {
logf("envknob: %s=%q", k, set[k])
}
}
@@ -406,6 +403,19 @@ func SSHIgnoreTailnetPolicy() bool { return Bool("TS_DEBUG_SSH_IGNORE_TAILNET_PO
// TKASkipSignatureCheck reports whether to skip node-key signature checking for development.
func TKASkipSignatureCheck() bool { return Bool("TS_UNSAFE_SKIP_NKS_VERIFICATION") }
// App returns the tailscale app type of this instance, if set via
// TS_INTERNAL_APP env var. TS_INTERNAL_APP can be used to set app type for
// components that wrap tailscaled, such as containerboot. App type is intended
// to only be used to set known predefined app types, such as Tailscale
// Kubernetes Operator components.
func App() string {
a := os.Getenv("TS_INTERNAL_APP")
if a == kubetypes.AppConnector || a == kubetypes.AppEgressProxy || a == kubetypes.AppIngressProxy || a == kubetypes.AppIngressResource {
return a
}
return ""
}
// CrashOnUnexpected reports whether the Tailscale client should panic
// on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's
// used. Otherwise the default value is true for unstable builds.

12
vendor/tailscale.com/flake.lock generated vendored
View File

@@ -21,11 +21,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1705309234,
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
@@ -36,11 +36,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1707619277,
"narHash": "sha256-vKnYD5GMQbNQyyQm4wRlqi+5n0/F1hnvqSQgaBy4BqY=",
"lastModified": 1724748588,
"narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f3a93440fbfff8a74350f4791332a19282cc6dc8",
"rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99",
"type": "github"
},
"original": {

22
vendor/tailscale.com/flake.nix generated vendored
View File

@@ -40,7 +40,12 @@
};
};
outputs = { self, nixpkgs, flake-utils, flake-compat }: let
outputs = {
self,
nixpkgs,
flake-utils,
flake-compat,
}: let
# tailscaleRev is the git commit at which this flake was imported,
# or the empty string when building from a local checkout of the
# tailscale repo.
@@ -62,15 +67,16 @@
# So really, this flake is for tailscale devs to dogfood with, if
# you're an end user you should be prepared for this flake to not
# build periodically.
tailscale = pkgs: pkgs.buildGo122Module rec {
tailscale = pkgs:
pkgs.buildGo123Module rec {
name = "tailscale";
src = ./.;
vendorHash = pkgs.lib.fileContents ./go.mod.sri;
nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [ pkgs.makeWrapper ];
nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [pkgs.makeWrapper];
ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"];
CGO_ENABLED = 0;
subPackages = [ "cmd/tailscale" "cmd/tailscaled" ];
subPackages = ["cmd/tailscale" "cmd/tailscaled"];
doCheck = false;
# NOTE: We strip the ${PORT} and $FLAGS because they are unset in the
@@ -79,8 +85,8 @@
# things, but for now, we hardcode the default of port 41641 (taken from
# ./cmd/tailscaled/tailscaled.defaults).
postInstall = pkgs.lib.optionalString pkgs.stdenv.isLinux ''
wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [ pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow ]}
wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [ pkgs.procps ]}
wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]}
wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]}
sed -i \
-e "s#/usr/sbin#$out/bin#" \
@@ -112,7 +118,7 @@
gotools
graphviz
perl
go_1_22
go_1_23
yarn
];
};
@@ -120,4 +126,4 @@
in
flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system);
}
# nix-direnv cache busting line: sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8=
# nix-direnv cache busting line: sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0=

2
vendor/tailscale.com/go.mod.sri generated vendored
View File

@@ -1 +1 @@
sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8=
sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0=

View File

@@ -1 +1 @@
tailscale.go1.22
tailscale.go1.23

View File

@@ -1 +1 @@
22ef9eb38e9a2d21b4a45f7adc75addb05f3efb8
ed9dc37b2b000f376a3e819cbb159e2c17a2dac6

View File

@@ -8,6 +8,7 @@ package health
import (
"context"
"errors"
"expvar"
"fmt"
"maps"
"net/http"
@@ -25,6 +26,7 @@ import (
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/util/set"
"tailscale.com/util/usermetric"
"tailscale.com/version"
)
@@ -1062,7 +1064,7 @@ func (t *Tracker) updateBuiltinWarnablesLocked() {
_ = t.lastStreamedMapResponse
_ = t.lastMapRequestHeard
shouldClearMagicsockWarnings := false
shouldClearMagicsockWarnings := true
for i := range t.MagicSockReceiveFuncs {
f := &t.MagicSockReceiveFuncs[i]
if f.missing {
@@ -1070,6 +1072,7 @@ func (t *Tracker) updateBuiltinWarnablesLocked() {
ArgMagicsockFunctionName: f.name,
})
shouldClearMagicsockWarnings = false
break
}
}
if shouldClearMagicsockWarnings {
@@ -1202,6 +1205,18 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats {
}
func (t *Tracker) doOnceInit() {
metricHealthMessage.Set(metricHealthMessageLabel{
Type: "warning",
}, expvar.Func(func() any {
if t.nil() {
return 0
}
t.mu.Lock()
defer t.mu.Unlock()
t.updateBuiltinWarnablesLocked()
return int64(len(t.stringsLocked()))
}))
for i := range t.MagicSockReceiveFuncs {
f := &t.MagicSockReceiveFuncs[i]
f.name = (ReceiveFunc(i)).String()
@@ -1232,3 +1247,14 @@ func (t *Tracker) checkReceiveFuncsLocked() {
f.missing = true
}
}
type metricHealthMessageLabel struct {
// TODO: break down by warnable.severity as well?
Type string
}
var metricHealthMessage = usermetric.NewMultiLabelMap[metricHealthMessageLabel](
"tailscaled_health_messages",
"gauge",
"Number of health messages broken down by type.",
)

55
vendor/tailscale.com/kube/kubetypes/grants.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package kubetypes contains types and constants related to the Tailscale
// Kubernetes Operator.
// These are split into a separate package for consumption of
// non-Kubernetes shared libraries and binaries. Be mindful of not increasing
// dependency size for those consumers when adding anything new here.
package kubetypes
import "net/netip"
// KubernetesCapRule is a rule provided via PeerCapabilityKubernetes capability.
type KubernetesCapRule struct {
// Impersonate is a list of rules that specify how to impersonate the caller
// when proxying to the Kubernetes API.
Impersonate *ImpersonateRule `json:"impersonate,omitempty"`
// Recorders defines a tag of a tsrecorder instance(s) that a recording
// of a 'kubectl exec' session, matching `src` of this grant, to an API
// server proxy, matching `dst` of this grant, should be sent to.
// This list must not contain more than one tag. The field
// name matches the `Recorder` field with equal semantics for Tailscale
// SSH session recorder. This field is set by users in ACL grants and is
// then parsed by control, which resolves the tags and populates `RecorderAddrs``.
// https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-acls
Recorders []string `json:"recorder,omitempty"`
// RecorderAddrs is a list of addresses that should be addresses of one
// or more tsrecorder instance(s). If set, any `kubectl exec` session
// from a client matching `src` of this grant to an API server proxy
// matching `dst` of this grant will be recorded and the recording will
// be sent to the tsrecorder. This field does not exist in the user
// provided ACL grants - it is populated by control, which obtains the
// addresses by resolving the tags provided via `Recorders` field.
RecorderAddrs []netip.AddrPort `json:"recorderAddrs,omitempty"`
// EnforceRecorder defines whether a kubectl exec session from a client
// matching `src` to an API server proxy matching `dst` should fail
// closed if it cannot be recorded (i.e if no recorder can be reached).
// Default is to fail open.
// The field name matches `EnforceRecorder` field with equal semantics for Tailscale SSH
// session recorder.
// https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-acls
EnforceRecorder bool `json:"enforceRecorder,omitempty"`
}
// ImpersonateRule defines how a request from the tailnet identity matching
// 'src' of this grant should be impersonated.
type ImpersonateRule struct {
// Groups can be used to set a list of groups that a request to
// Kubernetes API server should be impersonated as from. Groups in
// Kubernetes only exist as subjects that RBAC rules refer to. Caller
// can choose to use an existing group, such as system:masters, or
// create RBAC for a new group.
// https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-subjects
Groups []string `json:"groups,omitempty"`
}

24
vendor/tailscale.com/kube/kubetypes/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,24 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package kubetypes
const (
// Hostinfo App values for the Tailscale Kubernetes Operator components.
AppOperator = "k8s-operator"
AppAPIServerProxy = "k8s-operator-proxy"
AppIngressProxy = "k8s-operator-ingress-proxy"
AppIngressResource = "k8s-operator-ingress-resource"
AppEgressProxy = "k8s-operator-egress-proxy"
AppConnector = "k8s-operator-connector-resource"
// Clientmetrics for Tailscale Kubernetes Operator components
MetricIngressProxyCount = "k8s_ingress_proxies" // L3
MetricIngressResourceCount = "k8s_ingress_resources" // L7
MetricEgressProxyCount = "k8s_egress_proxies"
MetricConnectorResourceCount = "k8s_connector_resources"
MetricConnectorWithSubnetRouterCount = "k8s_connector_subnetrouter_resources"
MetricConnectorWithExitNodeCount = "k8s_connector_exitnode_resources"
MetricNameserverCount = "k8s_nameserver_resources"
MetricRecorderCount = "k8s_recorder_resources"
)

41
vendor/tailscale.com/metrics/fds_linux.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package metrics
import (
"io/fs"
"sync"
"go4.org/mem"
"tailscale.com/util/dirwalk"
)
// counter is a reusable counter for counting file descriptors.
type counter struct {
n int
// cb is the (*counter).count method value. Creating it allocates,
// so we have to save it away and use a sync.Pool to keep currentFDs
// amortized alloc-free.
cb func(name mem.RO, de fs.DirEntry) error
}
var counterPool = &sync.Pool{New: func() any {
c := new(counter)
c.cb = c.count
return c
}}
func (c *counter) count(name mem.RO, de fs.DirEntry) error {
c.n++
return nil
}
func currentFDs() int {
c := counterPool.Get().(*counter)
defer counterPool.Put(c)
c.n = 0
dirwalk.WalkShallow(mem.S("/proc/self/fd"), c.cb)
return c.n
}

8
vendor/tailscale.com/metrics/fds_notlinux.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !linux
package metrics
func currentFDs() int { return 0 }

163
vendor/tailscale.com/metrics/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,163 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package metrics contains expvar & Prometheus types and code used by
// Tailscale for monitoring.
package metrics
import (
"expvar"
"fmt"
"io"
"slices"
"strings"
)
// Set is a string-to-Var map variable that satisfies the expvar.Var
// interface.
//
// Semantically, this is mapped by tsweb's Prometheus exporter as a
// collection of unrelated variables exported with a common prefix.
//
// This lets us have tsweb recognize *expvar.Map for different
// purposes in the future. (Or perhaps all uses of expvar.Map will
// require explicit types like this one, declaring how we want tsweb
// to export it to Prometheus.)
type Set struct {
expvar.Map
}
// LabelMap is a string-to-Var map variable that satisfies the
// expvar.Var interface.
//
// Semantically, this is mapped by tsweb's Prometheus exporter as a
// collection of variables with the same name, with a varying label
// value. Use this to export things that are intuitively breakdowns
// into different buckets.
type LabelMap struct {
Label string
expvar.Map
}
// SetInt64 sets the *Int value stored under the given map key.
func (m *LabelMap) SetInt64(key string, v int64) {
m.Get(key).Set(v)
}
// Get returns a direct pointer to the expvar.Int for key, creating it
// if necessary.
func (m *LabelMap) Get(key string) *expvar.Int {
m.Add(key, 0)
return m.Map.Get(key).(*expvar.Int)
}
// GetIncrFunc returns a function that increments the expvar.Int named by key.
//
// Most callers should not need this; it exists to satisfy an
// interface elsewhere.
func (m *LabelMap) GetIncrFunc(key string) func(delta int64) {
return m.Get(key).Add
}
// GetFloat returns a direct pointer to the expvar.Float for key, creating it
// if necessary.
func (m *LabelMap) GetFloat(key string) *expvar.Float {
m.AddFloat(key, 0.0)
return m.Map.Get(key).(*expvar.Float)
}
// CurrentFDs reports how many file descriptors are currently open.
//
// It only works on Linux. It returns zero otherwise.
func CurrentFDs() int {
return currentFDs()
}
// Histogram is a histogram of values.
// It should be created with NewHistogram.
type Histogram struct {
// buckets is a list of bucket boundaries, in increasing order.
buckets []float64
// bucketStrings is a list of the same buckets, but as strings.
// This are allocated once at creation time by NewHistogram.
bucketStrings []string
bucketVars []expvar.Int
sum expvar.Float
count expvar.Int
}
// NewHistogram returns a new histogram that reports to the given
// expvar map under the given name.
//
// The buckets are the boundaries of the histogram buckets, in
// increasing order. The last bucket is +Inf.
func NewHistogram(buckets []float64) *Histogram {
if !slices.IsSorted(buckets) {
panic("buckets must be sorted")
}
labels := make([]string, len(buckets))
for i, b := range buckets {
labels[i] = fmt.Sprintf("%v", b)
}
h := &Histogram{
buckets: buckets,
bucketStrings: labels,
bucketVars: make([]expvar.Int, len(buckets)),
}
return h
}
// Observe records a new observation in the histogram.
func (h *Histogram) Observe(v float64) {
h.sum.Add(v)
h.count.Add(1)
for i, b := range h.buckets {
if v <= b {
h.bucketVars[i].Add(1)
}
}
}
// String returns a JSON representation of the histogram.
// This is used to satisfy the expvar.Var interface.
func (h *Histogram) String() string {
var b strings.Builder
fmt.Fprintf(&b, "{")
first := true
h.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(&b, ",")
}
fmt.Fprintf(&b, "%q: ", kv.Key)
if kv.Value != nil {
fmt.Fprintf(&b, "%v", kv.Value)
} else {
fmt.Fprint(&b, "null")
}
first = false
})
fmt.Fprintf(&b, ",\"sum\": %v", &h.sum)
fmt.Fprintf(&b, ",\"count\": %v", &h.count)
fmt.Fprintf(&b, "}")
return b.String()
}
// Do calls f for each bucket in the histogram.
func (h *Histogram) Do(f func(expvar.KeyValue)) {
for i := range h.bucketVars {
f(expvar.KeyValue{Key: h.bucketStrings[i], Value: &h.bucketVars[i]})
}
f(expvar.KeyValue{Key: "+Inf", Value: &h.count})
}
// PromExport writes the histogram to w in Prometheus exposition format.
func (h *Histogram) PromExport(w io.Writer, name string) {
fmt.Fprintf(w, "# TYPE %s histogram\n", name)
h.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(w, "%s_bucket{le=%q} %v\n", name, kv.Key, kv.Value)
})
fmt.Fprintf(w, "%s_sum %v\n", name, &h.sum)
fmt.Fprintf(w, "%s_count %v\n", name, &h.count)
}

283
vendor/tailscale.com/metrics/multilabelmap.go generated vendored Normal file
View File

@@ -0,0 +1,283 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package metrics
import (
"expvar"
"fmt"
"io"
"reflect"
"sort"
"strings"
"sync"
)
// MultiLabelMap is a struct-value-to-Var map variable that satisfies the
// [expvar.Var] interface but also allows for multiple Prometheus labels to be
// associated with each value.
//
// T must be a struct type with scalar fields. The struct field names
// (lowercased) are used as the labels, unless a "prom" struct tag is present.
// The struct fields must all be strings, and the string values must be valid
// Prometheus label values without requiring quoting.
type MultiLabelMap[T comparable] struct {
Type string // optional Prometheus type ("counter", "gauge")
Help string // optional Prometheus help string
m sync.Map // map[T]expvar.Var
mu sync.RWMutex
sorted []labelsAndValue[T] // by labels string, to match expvar.Map + for aesthetics in output
}
// NewMultiLabelMap creates and publishes (via expvar.Publish) a new
// MultiLabelMap[T] variable with the given name and returns it.
func NewMultiLabelMap[T comparable](name string, promType, helpText string) *MultiLabelMap[T] {
m := &MultiLabelMap[T]{
Type: promType,
Help: helpText,
}
var zero T
_ = LabelString(zero) // panic early if T is invalid
expvar.Publish(name, m)
return m
}
type labelsAndValue[T comparable] struct {
key T
labels string // Prometheus-formatted {label="value",label="value"} string
val expvar.Var
}
// LabelString returns a Prometheus-formatted label string for the given key.
// k must be a struct type with scalar fields, as required by MultiLabelMap,
// if k is not a struct, it will panic.
func LabelString(k any) string {
rv := reflect.ValueOf(k)
t := rv.Type()
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("MultiLabelMap must use keys of type struct; got %v", t))
}
var sb strings.Builder
sb.WriteString("{")
for i := range t.NumField() {
if i > 0 {
sb.WriteString(",")
}
ft := t.Field(i)
label := ft.Tag.Get("prom")
if label == "" {
label = strings.ToLower(ft.Name)
}
fv := rv.Field(i)
switch fv.Kind() {
case reflect.String:
fmt.Fprintf(&sb, "%s=%q", label, fv.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fmt.Fprintf(&sb, "%s=\"%d\"", label, fv.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
fmt.Fprintf(&sb, "%s=\"%d\"", label, fv.Uint())
case reflect.Bool:
fmt.Fprintf(&sb, "%s=\"%v\"", label, fv.Bool())
default:
panic(fmt.Sprintf("MultiLabelMap key field %q has unsupported type %v", ft.Name, fv.Type()))
}
}
sb.WriteString("}")
return sb.String()
}
// KeyValue represents a single entry in a [MultiLabelMap].
type KeyValue[T comparable] struct {
Key T
Value expvar.Var
}
func (v *MultiLabelMap[T]) String() string {
return `"MultiLabelMap"`
}
// WritePrometheus writes v to w in Prometheus exposition format.
// The name argument is the metric name.
func (v *MultiLabelMap[T]) WritePrometheus(w io.Writer, name string) {
if v.Type != "" {
io.WriteString(w, "# TYPE ")
io.WriteString(w, name)
io.WriteString(w, " ")
io.WriteString(w, v.Type)
io.WriteString(w, "\n")
}
if v.Help != "" {
io.WriteString(w, "# HELP ")
io.WriteString(w, name)
io.WriteString(w, " ")
io.WriteString(w, v.Help)
io.WriteString(w, "\n")
}
v.mu.RLock()
defer v.mu.RUnlock()
for _, kv := range v.sorted {
io.WriteString(w, name)
io.WriteString(w, kv.labels)
switch v := kv.val.(type) {
case *expvar.Int:
fmt.Fprintf(w, " %d\n", v.Value())
case *expvar.Float:
fmt.Fprintf(w, " %v\n", v.Value())
default:
fmt.Fprintf(w, " %s\n", kv.val)
}
}
}
// Init removes all keys from the map.
//
// Think of it as "Reset", but it's named Init to match expvar.Map.Init.
func (v *MultiLabelMap[T]) Init() *MultiLabelMap[T] {
v.mu.Lock()
defer v.mu.Unlock()
v.sorted = nil
v.m.Range(func(k, _ any) bool {
v.m.Delete(k)
return true
})
return v
}
// addKeyLocked updates the sorted list of keys in v.keys.
//
// v.mu must be held.
func (v *MultiLabelMap[T]) addKeyLocked(key T, val expvar.Var) {
ls := LabelString(key)
ent := labelsAndValue[T]{key, ls, val}
// Using insertion sort to place key into the already-sorted v.keys.
i := sort.Search(len(v.sorted), func(i int) bool {
return v.sorted[i].labels >= ls
})
if i >= len(v.sorted) {
v.sorted = append(v.sorted, ent)
} else if v.sorted[i].key == key {
v.sorted[i].val = val
} else {
var zero labelsAndValue[T]
v.sorted = append(v.sorted, zero)
copy(v.sorted[i+1:], v.sorted[i:])
v.sorted[i] = ent
}
}
// Get returns the expvar for the given key, or nil if it doesn't exist.
func (v *MultiLabelMap[T]) Get(key T) expvar.Var {
i, _ := v.m.Load(key)
av, _ := i.(expvar.Var)
return av
}
func newInt() expvar.Var { return new(expvar.Int) }
func newFloat() expvar.Var { return new(expvar.Float) }
// getOrFill returns the expvar.Var for the given key, atomically creating it
// once (for all callers) with fill if it doesn't exist.
func (v *MultiLabelMap[T]) getOrFill(key T, fill func() expvar.Var) expvar.Var {
if v := v.Get(key); v != nil {
return v
}
v.mu.Lock()
defer v.mu.Unlock()
if v := v.Get(key); v != nil {
return v
}
nv := fill()
v.addKeyLocked(key, nv)
v.m.Store(key, nv)
return nv
}
// Set sets key to val.
//
// This is not optimized for highly concurrent usage; it's presumed to only be
// used rarely, at startup.
func (v *MultiLabelMap[T]) Set(key T, val expvar.Var) {
v.mu.Lock()
defer v.mu.Unlock()
v.addKeyLocked(key, val)
v.m.Store(key, val)
}
// SetInt sets val to the *[expvar.Int] value stored under the given map key,
// creating it if it doesn't exist yet.
// It does nothing if key exists but is of the wrong type.
func (v *MultiLabelMap[T]) SetInt(key T, val int64) {
// Set to Int; ignore otherwise.
if iv, ok := v.getOrFill(key, newInt).(*expvar.Int); ok {
iv.Set(val)
}
}
// SetFloat sets val to the *[expvar.Float] value stored under the given map key,
// creating it if it doesn't exist yet.
// It does nothing if key exists but is of the wrong type.
func (v *MultiLabelMap[T]) SetFloat(key T, val float64) {
// Set to Float; ignore otherwise.
if iv, ok := v.getOrFill(key, newFloat).(*expvar.Float); ok {
iv.Set(val)
}
}
// Add adds delta to the *[expvar.Int] value stored under the given map key,
// creating it if it doesn't exist yet.
// It does nothing if key exists but is of the wrong type.
func (v *MultiLabelMap[T]) Add(key T, delta int64) {
// Add to Int; ignore otherwise.
if iv, ok := v.getOrFill(key, newInt).(*expvar.Int); ok {
iv.Add(delta)
}
}
// Add adds delta to the *[expvar.Float] value stored under the given map key,
// creating it if it doesn't exist yet.
// It does nothing if key exists but is of the wrong type.
func (v *MultiLabelMap[T]) AddFloat(key T, delta float64) {
// Add to Float; ignore otherwise.
if iv, ok := v.getOrFill(key, newFloat).(*expvar.Float); ok {
iv.Add(delta)
}
}
// Delete deletes the given key from the map.
//
// This is not optimized for highly concurrent usage; it's presumed to only be
// used rarely, at startup.
func (v *MultiLabelMap[T]) Delete(key T) {
ls := LabelString(key)
v.mu.Lock()
defer v.mu.Unlock()
// Using insertion sort to place key into the already-sorted v.keys.
i := sort.Search(len(v.sorted), func(i int) bool {
return v.sorted[i].labels >= ls
})
if i < len(v.sorted) && v.sorted[i].key == key {
v.sorted = append(v.sorted[:i], v.sorted[i+1:]...)
v.m.Delete(key)
}
}
// Do calls f for each entry in the map.
// The map is locked during the iteration,
// but existing entries may be concurrently updated.
func (v *MultiLabelMap[T]) Do(f func(KeyValue[T])) {
v.mu.RLock()
defer v.mu.RUnlock()
for _, e := range v.sorted {
f(KeyValue[T]{e.key, e.val})
}
}

View File

@@ -7,21 +7,20 @@ import (
"bytes"
"context"
"github.com/illarion/gonotify"
"github.com/illarion/gonotify/v2"
"tailscale.com/health"
)
func (m *directManager) runFileWatcher() {
in, err := gonotify.NewInotify()
ctx, cancel := context.WithCancel(m.ctx)
defer cancel()
in, err := gonotify.NewInotify(ctx)
if err != nil {
// Oh well, we tried. This is all best effort for now, to
// surface warnings to users.
m.logf("dns: inotify new: %v", err)
return
}
ctx, cancel := context.WithCancel(m.ctx)
defer cancel()
go m.closeInotifyOnDone(ctx, in)
const events = gonotify.IN_ATTRIB |
gonotify.IN_CLOSE_WRITE |
@@ -107,8 +106,3 @@ func (m *directManager) checkForFileTrample() {
m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show)
m.health.SetUnhealthy(resolvTrampleWarnable, nil)
}
func (m *directManager) closeInotifyOnDone(ctx context.Context, in *gonotify.Inotify) {
<-ctx.Done()
in.Close()
}

View File

@@ -122,6 +122,11 @@ func (m *Manager) Set(cfg Config) error {
return m.setLocked(cfg)
}
// GetBaseConfig returns the current base OS DNS configuration as provided by the OSConfigurator.
func (m *Manager) GetBaseConfig() (OSConfig, error) {
return m.os.GetBaseConfig()
}
// setLocked sets the DNS configuration.
//
// m.mu must be held.

View File

@@ -10,6 +10,8 @@ import (
"go4.org/mem"
"tailscale.com/control/controlknobs"
"tailscale.com/health"
"tailscale.com/net/dns/resolvconffile"
"tailscale.com/net/tsaddr"
"tailscale.com/types/logger"
"tailscale.com/util/mak"
)
@@ -83,8 +85,36 @@ func (c *darwinConfigurator) SetDNS(cfg OSConfig) error {
return c.removeResolverFiles(func(domain string) bool { return !keep[domain] })
}
// GetBaseConfig returns the current OS DNS configuration, extracting it from /etc/resolv.conf.
// We should really be using the SystemConfiguration framework to get this information, as this
// is not a stable public API, and is provided mostly as a compatibility effort with Unix
// tools. Apple might break this in the future. But honestly, parsing the output of `scutil --dns`
// is *even more* likely to break in the future.
func (c *darwinConfigurator) GetBaseConfig() (OSConfig, error) {
return OSConfig{}, ErrGetBaseConfigNotSupported
cfg := OSConfig{}
resolvConf, err := resolvconffile.ParseFile("/etc/resolv.conf")
if err != nil {
c.logf("failed to parse /etc/resolv.conf: %v", err)
return cfg, ErrGetBaseConfigNotSupported
}
for _, ns := range resolvConf.Nameservers {
if ns == tsaddr.TailscaleServiceIP() || ns == tsaddr.TailscaleServiceIPv6() {
// If we find Quad100 in /etc/resolv.conf, we should ignore it
c.logf("ignoring 100.100.100.100 resolver IP found in /etc/resolv.conf")
continue
}
cfg.Nameservers = append(cfg.Nameservers, ns)
}
cfg.SearchDomains = resolvConf.SearchDomains
if len(cfg.Nameservers) == 0 {
// Log a warning in case we couldn't find any nameservers in /etc/resolv.conf.
c.logf("no nameservers found in /etc/resolv.conf, DNS resolution might fail")
}
return cfg, nil
}
const macResolverFileHeader = "# Added by tailscaled\n"

View File

@@ -469,6 +469,9 @@ func (m *windowsManager) disableDynamicUpdates() error {
}
defer k.Close()
if err := k.SetDWordValue("RegistrationEnabled", 0); err != nil {
return err
}
if err := k.SetDWordValue("DisableDynamicUpdate", 1); err != nil {
return err
}

View File

@@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"net/netip"
"slices"
"strings"
"tailscale.com/types/logger"
@@ -103,10 +104,16 @@ func (o *OSConfig) WriteToBufioWriter(w *bufio.Writer) {
}
func (o OSConfig) IsZero() bool {
return len(o.Nameservers) == 0 && len(o.SearchDomains) == 0 && len(o.MatchDomains) == 0
return len(o.Hosts) == 0 &&
len(o.Nameservers) == 0 &&
len(o.SearchDomains) == 0 &&
len(o.MatchDomains) == 0
}
func (a OSConfig) Equal(b OSConfig) bool {
if len(a.Hosts) != len(b.Hosts) {
return false
}
if len(a.Nameservers) != len(b.Nameservers) {
return false
}
@@ -117,6 +124,15 @@ func (a OSConfig) Equal(b OSConfig) bool {
return false
}
for i := range a.Hosts {
ha, hb := a.Hosts[i], b.Hosts[i]
if ha.Addr != hb.Addr {
return false
}
if !slices.Equal(ha.Hosts, hb.Hosts) {
return false
}
}
for i := range a.Nameservers {
if a.Nameservers[i] != b.Nameservers[i] {
return false

View File

@@ -181,7 +181,7 @@ var dnsForwarderFailing = health.Register(&health.Warnable{
DependsOn: []*health.Warnable{health.NetworkStatusWarnable},
Text: health.StaticMessage("Tailscale can't reach the configured DNS servers. Internet connectivity may be affected."),
ImpactsConnectivity: true,
TimeToVisible: 5 * time.Second,
TimeToVisible: 15 * time.Second,
})
type route struct {

View File

@@ -15,8 +15,6 @@ import (
"net"
"net/netip"
"os"
"os/exec"
"runtime"
"strings"
"sync/atomic"
@@ -50,9 +48,6 @@ ens18 0000000A 00000000 0001 0 0 0 0000FFFF
func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) {
if procNetRouteErr.Load() {
// If we failed to read /proc/net/route previously, don't keep trying.
if runtime.GOOS == "android" {
return likelyHomeRouterIPAndroid()
}
return ret, myIP, false
}
lineNum := 0
@@ -94,9 +89,6 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) {
}
if err != nil {
procNetRouteErr.Store(true)
if runtime.GOOS == "android" {
return likelyHomeRouterIPAndroid()
}
log.Printf("interfaces: failed to read /proc/net/route: %v", err)
}
if ret.IsValid() {
@@ -137,41 +129,6 @@ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) {
return netip.Addr{}, netip.Addr{}, false
}
// Android apps don't have permission to read /proc/net/route, at
// least on Google devices and the Android emulator.
func likelyHomeRouterIPAndroid() (ret netip.Addr, _ netip.Addr, ok bool) {
cmd := exec.Command("/system/bin/ip", "route", "show", "table", "0")
out, err := cmd.StdoutPipe()
if err != nil {
return
}
if err := cmd.Start(); err != nil {
log.Printf("interfaces: running /system/bin/ip: %v", err)
return
}
// Search for line like "default via 10.0.2.2 dev radio0 table 1016 proto static mtu 1500 "
lineread.Reader(out, func(line []byte) error {
const pfx = "default via "
if !mem.HasPrefix(mem.B(line), mem.S(pfx)) {
return nil
}
line = line[len(pfx):]
sp := bytes.IndexByte(line, ' ')
if sp == -1 {
return nil
}
ipb := line[:sp]
if ip, err := netip.ParseAddr(string(ipb)); err == nil && ip.Is4() {
ret = ip
log.Printf("interfaces: found Android default route %v", ip)
}
return nil
})
cmd.Process.Kill()
cmd.Wait()
return ret, netip.Addr{}, ret.IsValid()
}
func defaultRoute() (d DefaultRouteDetails, err error) {
v, err := defaultRouteInterfaceProcNet()
if err == nil {

2
vendor/tailscale.com/shell.nix generated vendored
View File

@@ -16,4 +16,4 @@
) {
src = ./.;
}).shellNix
# nix-direnv cache busting line: sha256-M5e5dE1gGW3ly94r3SxCsBmVwbBmhVtaVDW691vxG/8=
# nix-direnv cache busting line: sha256-xO1DuLWi6/lpA9ubA2ZYVJM+CkVNA5IaVGZxX9my0j0=

View File

@@ -147,7 +147,9 @@ type CapabilityVersion int
// - 102: 2024-07-12: NodeAttrDisableMagicSockCryptoRouting support
// - 103: 2024-07-24: Client supports NodeAttrDisableCaptivePortalDetection
// - 104: 2024-08-03: SelfNodeV6MasqAddrForThisPeer now works
const CurrentCapabilityVersion CapabilityVersion = 104
// - 105: 2024-08-05: Fixed SSH behavior on systems that use busybox (issue #12849)
// - 106: 2024-09-03: fix panic regression from cryptokey routing change (65fe0ba7b5)
const CurrentCapabilityVersion CapabilityVersion = 106
type StableID string
@@ -2306,6 +2308,13 @@ const (
// Added 2024-05-29 in Tailscale version 1.68.
NodeAttrSSHBehaviorV1 NodeCapability = "ssh-behavior-v1"
// NodeAttrSSHBehaviorV2 forces SSH to use the V2 behavior (use su, run SFTP in child process).
// This overrides NodeAttrSSHBehaviorV1 if set.
// See forceV1Behavior in ssh/tailssh/incubator.go for distinction between
// V1 and V2 behavior.
// Added 2024-08-06 in Tailscale version 1.72.
NodeAttrSSHBehaviorV2 NodeCapability = "ssh-behavior-v2"
// NodeAttrDisableSplitDNSWhenNoCustomResolvers indicates that the node's
// DNS manager should not adopt a split DNS configuration even though the
// Config of the resolver only contains routes that do not specify custom

54
vendor/tailscale.com/tka/sig.go generated vendored
View File

@@ -19,6 +19,8 @@ import (
"tailscale.com/types/tkatype"
)
//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=NodeKeySignature
// SigKind describes valid NodeKeySignature types.
type SigKind uint8
@@ -370,10 +372,15 @@ func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.Marsha
return oldNKS, nil
}
nested, err := maybeTrimRotationSignatureChain(oldSig, priv)
if err != nil {
return nil, fmt.Errorf("trimming rotation signature chain: %w", err)
}
newSig := NodeKeySignature{
SigKind: SigRotation,
Pubkey: nk,
Nested: &oldSig,
Nested: &nested,
}
if newSig.Signature, err = priv.SignNKS(newSig.SigHash()); err != nil {
return nil, fmt.Errorf("signing NKS: %w", err)
@@ -382,6 +389,51 @@ func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.Marsha
return newSig.Serialize(), nil
}
// maybeTrimRotationSignatureChain truncates rotation signature chain to ensure
// it contains no more than 15 node keys.
func maybeTrimRotationSignatureChain(sig NodeKeySignature, priv key.NLPrivate) (NodeKeySignature, error) {
if sig.SigKind != SigRotation {
return sig, nil
}
// Collect all the previous node keys, ordered from newest to oldest.
prevPubkeys := [][]byte{sig.Pubkey}
nested := sig.Nested
for nested != nil {
if len(nested.Pubkey) > 0 {
prevPubkeys = append(prevPubkeys, nested.Pubkey)
}
if nested.SigKind != SigRotation {
break
}
nested = nested.Nested
}
// Existing rotation signature with 15 keys is the maximum we can wrap in a
// new signature without hitting the CBOR nesting limit of 16 (see
// MaxNestedLevels in tka.go).
const maxPrevKeys = 15
if len(prevPubkeys) <= maxPrevKeys {
return sig, nil
}
// Create a new rotation signature chain, starting with the original
// direct signature.
var err error
result := nested // original direct signature
for i := maxPrevKeys - 2; i >= 0; i-- {
result = &NodeKeySignature{
SigKind: SigRotation,
Pubkey: prevPubkeys[i],
Nested: result,
}
if result.Signature, err = priv.SignNKS(result.SigHash()); err != nil {
return sig, fmt.Errorf("signing NKS: %w", err)
}
}
return *result, nil
}
// SignByCredential signs a node public key by a private key which has its
// signing authority delegated by a SigCredential signature. This is used by
// wrapped auth keys.

32
vendor/tailscale.com/tka/tka_clone.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT.
package tka
// Clone makes a deep copy of NodeKeySignature.
// The result aliases no memory with the original.
func (src *NodeKeySignature) Clone() *NodeKeySignature {
if src == nil {
return nil
}
dst := new(NodeKeySignature)
*dst = *src
dst.Pubkey = append(src.Pubkey[:0:0], src.Pubkey...)
dst.KeyID = append(src.KeyID[:0:0], src.KeyID...)
dst.Signature = append(src.Signature[:0:0], src.Signature...)
dst.Nested = src.Nested.Clone()
dst.WrappingPubkey = append(src.WrappingPubkey[:0:0], src.WrappingPubkey...)
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _NodeKeySignatureCloneNeedsRegeneration = NodeKeySignature(struct {
SigKind SigKind
Pubkey []byte
KeyID []byte
Signature []byte
Nested *NodeKeySignature
WrappingPubkey []byte
}{})

414
vendor/tailscale.com/tsweb/varz/varz.go generated vendored Normal file
View File

@@ -0,0 +1,414 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Package varz contains code to export metrics in Prometheus format.
package varz
import (
"cmp"
"expvar"
"fmt"
"io"
"net/http"
"reflect"
"runtime"
"sort"
"strings"
"sync"
"time"
"unicode"
"unicode/utf8"
"tailscale.com/metrics"
"tailscale.com/version"
)
func init() {
expvar.Publish("process_start_unix_time", expvar.Func(func() any { return timeStart.Unix() }))
expvar.Publish("version", expvar.Func(func() any { return version.Long() }))
expvar.Publish("go_version", expvar.Func(func() any { return runtime.Version() }))
expvar.Publish("counter_uptime_sec", expvar.Func(func() any { return int64(Uptime().Seconds()) }))
expvar.Publish("gauge_goroutines", expvar.Func(func() any { return runtime.NumGoroutine() }))
}
const (
gaugePrefix = "gauge_"
counterPrefix = "counter_"
labelMapPrefix = "labelmap_"
histogramPrefix = "histogram_"
)
// prefixesToTrim contains key prefixes to remove when exporting and sorting metrics.
var prefixesToTrim = []string{gaugePrefix, counterPrefix, labelMapPrefix, histogramPrefix}
var timeStart = time.Now()
func Uptime() time.Duration { return time.Since(timeStart).Round(time.Second) }
// WritePrometheusExpvar writes kv to w in Prometheus metrics format.
//
// See VarzHandler for conventions. This is exported primarily for
// people to test their varz.
func WritePrometheusExpvar(w io.Writer, kv expvar.KeyValue) {
writePromExpVar(w, "", kv)
}
type prometheusMetricDetails struct {
Name string
Type string
Label string
}
var prometheusMetricCache sync.Map // string => *prometheusMetricDetails
func prometheusMetric(prefix string, key string) (string, string, string) {
cachekey := prefix + key
if v, ok := prometheusMetricCache.Load(cachekey); ok {
d := v.(*prometheusMetricDetails)
return d.Name, d.Type, d.Label
}
var typ string
var label string
switch {
case strings.HasPrefix(key, gaugePrefix):
typ = "gauge"
key = strings.TrimPrefix(key, gaugePrefix)
case strings.HasPrefix(key, counterPrefix):
typ = "counter"
key = strings.TrimPrefix(key, counterPrefix)
case strings.HasPrefix(key, histogramPrefix):
typ = "histogram"
key = strings.TrimPrefix(key, histogramPrefix)
}
if strings.HasPrefix(key, labelMapPrefix) {
key = strings.TrimPrefix(key, labelMapPrefix)
if a, b, ok := strings.Cut(key, "_"); ok {
label, key = a, b
}
}
// Convert the metric to a valid Prometheus metric name.
// "Metric names may contain ASCII letters, digits, underscores, and colons.
// It must match the regex [a-zA-Z_:][a-zA-Z0-9_:]*"
mapInvalidMetricRunes := func(r rune) rune {
if r >= 'a' && r <= 'z' ||
r >= 'A' && r <= 'Z' ||
r >= '0' && r <= '9' ||
r == '_' || r == ':' {
return r
}
if r < utf8.RuneSelf && unicode.IsPrint(r) {
return '_'
}
return -1
}
metricName := strings.Map(mapInvalidMetricRunes, prefix+key)
if metricName == "" || unicode.IsDigit(rune(metricName[0])) {
metricName = "_" + metricName
}
d := &prometheusMetricDetails{
Name: metricName,
Type: typ,
Label: label,
}
prometheusMetricCache.Store(cachekey, d)
return d.Name, d.Type, d.Label
}
func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) {
key := kv.Key
name, typ, label := prometheusMetric(prefix, key)
switch v := kv.Value.(type) {
case *expvar.Int:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value())
return
case *expvar.Float:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "gauge"), name, v.Value())
return
case *metrics.Set:
v.Do(func(kv expvar.KeyValue) {
writePromExpVar(w, name+"_", kv)
})
return
case PrometheusWriter:
v.WritePrometheus(w, name)
return
case PrometheusMetricsReflectRooter:
root := v.PrometheusMetricsReflectRoot()
rv := reflect.ValueOf(root)
if rv.Type().Kind() == reflect.Ptr {
if rv.IsNil() {
return
}
rv = rv.Elem()
}
if rv.Type().Kind() != reflect.Struct {
fmt.Fprintf(w, "# skipping expvar %q; unknown root type\n", name)
return
}
foreachExportedStructField(rv, func(fieldOrJSONName, metricType string, rv reflect.Value) {
mname := name + "_" + fieldOrJSONName
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", mname, metricType, mname, rv.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", mname, metricType, mname, rv.Uint())
case reflect.Float32, reflect.Float64:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", mname, metricType, mname, rv.Float())
case reflect.Struct:
if rv.CanAddr() {
// Slight optimization, not copying big structs if they're addressable:
writePromExpVar(w, name+"_", expvar.KeyValue{Key: fieldOrJSONName, Value: expVarPromStructRoot{rv.Addr().Interface()}})
} else {
writePromExpVar(w, name+"_", expvar.KeyValue{Key: fieldOrJSONName, Value: expVarPromStructRoot{rv.Interface()}})
}
}
return
})
return
}
if typ == "" {
var funcRet string
if f, ok := kv.Value.(expvar.Func); ok {
v := f()
if ms, ok := v.(runtime.MemStats); ok && name == "memstats" {
writeMemstats(w, &ms)
return
}
if vs, ok := v.(string); ok && strings.HasSuffix(name, "version") {
fmt.Fprintf(w, "%s{version=%q} 1\n", name, vs)
return
}
switch v := v.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64:
fmt.Fprintf(w, "%s %v\n", name, v)
return
}
funcRet = fmt.Sprintf(" returning %T", v)
}
switch kv.Value.(type) {
default:
fmt.Fprintf(w, "# skipping expvar %q (Go type %T%s) with undeclared Prometheus type\n", name, kv.Value, funcRet)
return
case *metrics.LabelMap, *expvar.Map:
// Permit typeless LabelMap and expvar.Map for
// compatibility with old expvar-registered
// metrics.LabelMap.
}
}
switch v := kv.Value.(type) {
case expvar.Func:
val := v()
switch val.(type) {
case float64, int64, int:
fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, typ, name, val)
default:
fmt.Fprintf(w, "# skipping expvar func %q returning unknown type %T\n", name, val)
}
case *metrics.LabelMap:
if typ != "" {
fmt.Fprintf(w, "# TYPE %s %s\n", name, typ)
}
// IntMap uses expvar.Map on the inside, which presorts
// keys. The output ordering is deterministic.
v.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(w, "%s{%s=%q} %v\n", name, cmp.Or(v.Label, "label"), kv.Key, kv.Value)
})
case *metrics.Histogram:
v.PromExport(w, name)
case *expvar.Map:
if label != "" && typ != "" {
fmt.Fprintf(w, "# TYPE %s %s\n", name, typ)
v.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(w, "%s{%s=%q} %v\n", name, label, kv.Key, kv.Value)
})
} else {
v.Do(func(kv expvar.KeyValue) {
fmt.Fprintf(w, "%s_%s %v\n", name, kv.Key, kv.Value)
})
}
}
}
// PrometheusWriter is the interface implemented by metrics that can write
// themselves into Prometheus exposition format.
//
// As of 2024-03-25, this is only *metrics.MultiLabelMap.
type PrometheusWriter interface {
WritePrometheus(w io.Writer, name string)
}
var sortedKVsPool = &sync.Pool{New: func() any { return new(sortedKVs) }}
// sortedKV is a KeyValue with a sort key.
type sortedKV struct {
expvar.KeyValue
sortKey string // KeyValue.Key with type prefix removed
}
type sortedKVs struct {
kvs []sortedKV
}
// Handler is an HTTP handler to write expvar values into the
// prometheus export format:
//
// https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md
//
// It makes the following assumptions:
//
// - *expvar.Int are counters (unless marked as a gauge_; see below)
// - a *tailscale/metrics.Set is descended into, joining keys with
// underscores. So use underscores as your metric names.
// - an expvar named starting with "gauge_" or "counter_" is of that
// Prometheus type, and has that prefix stripped.
// - anything else is untyped and thus not exported.
// - expvar.Func can return an int or int64 (for now) and anything else
// is not exported.
//
// This will evolve over time, or perhaps be replaced.
func Handler(w http.ResponseWriter, r *http.Request) {
ExpvarDoHandler(expvarDo)(w, r)
}
// ExpvarDoHandler handler returns a Handler like above, but takes an optional
// expvar.Do func allow the usage of alternative containers of metrics, other
// than the global expvar.Map.
func ExpvarDoHandler(expvarDoFunc func(f func(expvar.KeyValue))) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain;version=0.0.4;charset=utf-8")
s := sortedKVsPool.Get().(*sortedKVs)
defer sortedKVsPool.Put(s)
s.kvs = s.kvs[:0]
expvarDoFunc(func(kv expvar.KeyValue) {
s.kvs = append(s.kvs, sortedKV{kv, removeTypePrefixes(kv.Key)})
})
sort.Slice(s.kvs, func(i, j int) bool {
return s.kvs[i].sortKey < s.kvs[j].sortKey
})
for _, e := range s.kvs {
writePromExpVar(w, "", e.KeyValue)
}
}
}
// PrometheusMetricsReflectRooter is an optional interface that expvar.Var implementations
// can implement to indicate that they should be walked recursively with reflect to find
// sets of fields to export.
type PrometheusMetricsReflectRooter interface {
expvar.Var
// PrometheusMetricsReflectRoot returns the struct or struct pointer to walk.
PrometheusMetricsReflectRoot() any
}
var expvarDo = expvar.Do // pulled out for tests
func writeMemstats(w io.Writer, ms *runtime.MemStats) {
out := func(name, typ string, v uint64, help string) {
if help != "" {
fmt.Fprintf(w, "# HELP memstats_%s %s\n", name, help)
}
fmt.Fprintf(w, "# TYPE memstats_%s %s\nmemstats_%s %v\n", name, typ, name, v)
}
g := func(name string, v uint64, help string) { out(name, "gauge", v, help) }
c := func(name string, v uint64, help string) { out(name, "counter", v, help) }
g("heap_alloc", ms.HeapAlloc, "current bytes of allocated heap objects (up/down smoothly)")
c("total_alloc", ms.TotalAlloc, "cumulative bytes allocated for heap objects")
g("sys", ms.Sys, "total bytes of memory obtained from the OS")
c("mallocs", ms.Mallocs, "cumulative count of heap objects allocated")
c("frees", ms.Frees, "cumulative count of heap objects freed")
c("num_gc", uint64(ms.NumGC), "number of completed GC cycles")
}
// sortedStructField is metadata about a struct field used both for sorting once
// (by structTypeSortedFields) and at serving time (by
// foreachExportedStructField).
type sortedStructField struct {
Index int // index of struct field in struct
Name string // struct field name, or "json" name
SortName string // Name with "foo_" type prefixes removed
MetricType string // the "metrictype" struct tag
StructFieldType *reflect.StructField
}
var structSortedFieldsCache sync.Map // reflect.Type => []sortedStructField
// structTypeSortedFields returns the sorted fields of t, caching as needed.
func structTypeSortedFields(t reflect.Type) []sortedStructField {
if v, ok := structSortedFieldsCache.Load(t); ok {
return v.([]sortedStructField)
}
fields := make([]sortedStructField, 0, t.NumField())
for i, n := 0, t.NumField(); i < n; i++ {
sf := t.Field(i)
name := sf.Name
if v := sf.Tag.Get("json"); v != "" {
v, _, _ = strings.Cut(v, ",")
if v == "-" {
// Skip it, regardless of its metrictype.
continue
}
if v != "" {
name = v
}
}
fields = append(fields, sortedStructField{
Index: i,
Name: name,
SortName: removeTypePrefixes(name),
MetricType: sf.Tag.Get("metrictype"),
StructFieldType: &sf,
})
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].SortName < fields[j].SortName
})
structSortedFieldsCache.Store(t, fields)
return fields
}
// removeTypePrefixes returns s with the first "foo_" prefix in prefixesToTrim
// removed.
func removeTypePrefixes(s string) string {
for _, prefix := range prefixesToTrim {
if trimmed, ok := strings.CutPrefix(s, prefix); ok {
return trimmed
}
}
return s
}
// foreachExportedStructField iterates over the fields in sorted order of
// their name, after removing metric prefixes. This is not necessarily the
// order they were declared in the struct
func foreachExportedStructField(rv reflect.Value, f func(fieldOrJSONName, metricType string, rv reflect.Value)) {
t := rv.Type()
for _, ssf := range structTypeSortedFields(t) {
sf := ssf.StructFieldType
if ssf.MetricType != "" || sf.Type.Kind() == reflect.Struct {
f(ssf.Name, ssf.MetricType, rv.Field(ssf.Index))
} else if sf.Type.Kind() == reflect.Ptr && sf.Type.Elem().Kind() == reflect.Struct {
fv := rv.Field(ssf.Index)
if !fv.IsNil() {
f(ssf.Name, ssf.MetricType, fv.Elem())
}
}
}
}
type expVarPromStructRoot struct{ v any }
func (r expVarPromStructRoot) PrometheusMetricsReflectRoot() any { return r.v }
func (r expVarPromStructRoot) String() string { panic("unused") }
var (
_ PrometheusMetricsReflectRooter = expVarPromStructRoot{}
_ expvar.Var = expVarPromStructRoot{}
)

Some files were not shown because too many files have changed in this diff Show More