diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 86b1edc1..e0c0d05d 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,7 +15,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Set up Docker uses: crazy-max/ghaction-setup-docker@v3 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 19b860d1..83919bed 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,7 +13,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Checkout code uses: actions/checkout@v2 @@ -105,7 +105,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Checkout code uses: actions/checkout@v2 @@ -179,7 +179,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Checkout code uses: actions/checkout@v2 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 32095c98..9c7c523c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,7 +14,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Push image to docker hub run: | @@ -31,7 +31,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Setup Minikube id: minikube @@ -90,7 +90,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Set up Docker uses: crazy-max/ghaction-setup-docker@v3 @@ -154,7 +154,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' - name: Set up Docker uses: crazy-max/ghaction-setup-docker@v3 diff --git a/.github/workflows/upload_release.yml b/.github/workflows/upload_release.yml index c8300213..b1c1720d 100644 --- a/.github/workflows/upload_release.yml +++ b/.github/workflows/upload_release.yml @@ -25,7 +25,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: '1.20' + go-version: '1.22' check-latest: true - name: Checkout code uses: actions/checkout@v2 diff --git a/build/Dockerfile b/build/Dockerfile index 58300483..12df9034 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,5 +1,5 @@ FROM envoyproxy/envoy:v1.25.0 AS envoy -FROM golang:1.20 AS builder +FROM golang:1.22 AS builder ARG BASE=github.com/wencaiwulue/kubevpn COPY . /go/src/$BASE diff --git a/build/dlv.Dockerfile b/build/dlv.Dockerfile index 2cfe5296..c2b9cba9 100644 --- a/build/dlv.Dockerfile +++ b/build/dlv.Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20 as delve +FROM golang:1.22 as delve RUN curl --location --output delve-1.20.1.tar.gz https://github.com/go-delve/delve/archive/v1.20.1.tar.gz \ && tar xzf delve-1.20.1.tar.gz RUN cd delve-1.20.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/ diff --git a/build/local.Dockerfile b/build/local.Dockerfile index cd3b2de6..12ba5edd 100644 --- a/build/local.Dockerfile +++ b/build/local.Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20 AS builder +FROM golang:1.22 AS builder RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct RUN go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/cmd/kubevpn/cmds/connect.go b/cmd/kubevpn/cmds/connect.go index 3dfd2a6e..dc5546b8 100644 --- a/cmd/kubevpn/cmds/connect.go +++ b/cmd/kubevpn/cmds/connect.go @@ -49,7 +49,15 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command { `)), PreRunE: func(cmd *cobra.Command, args []string) error { // startup daemon process and sudo process - return daemon.StartupDaemon(cmd.Context()) + err := daemon.StartupDaemon(cmd.Context()) + if err != nil { + return err + } + // not support temporally + if connect.Engine == config.EngineGvisor { + return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw) + } + return nil }, RunE: func(cmd *cobra.Command, args []string) error { bytes, ns, err := util.ConvertToKubeconfigBytes(f) diff --git a/go.mod b/go.mod index e0bbe07a..533fde2b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/wencaiwulue/kubevpn/v2 -go 1.20 +go 1.22 require ( github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735 @@ -33,21 +33,21 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.1 - golang.org/x/crypto v0.18.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 - golang.org/x/net v0.20.0 + golang.org/x/net v0.21.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 + golang.org/x/sys v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c golang.zx2c4.com/wireguard/windows v0.5.3 google.golang.org/grpc v1.61.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/protobuf v1.33.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33 + gvisor.dev/gvisor v0.0.0-20240331093445-9d995324d058 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/cli-runtime v0.29.0 @@ -220,7 +220,7 @@ require ( go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/term v0.16.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/tools v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index e4bc569d..a6bb9b48 100644 --- a/go.sum +++ b/go.sum @@ -80,6 +80,7 @@ github.com/DataDog/go-sqllexer v0.0.10/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65s github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I= github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= +github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -126,6 +127,7 @@ github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4t github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.49.13 h1:f4mGztsgnx2dR9r8FQYa9YW/RsKb+N7bgef4UGrOW1Y= @@ -301,6 +303,7 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= @@ -431,6 +434,7 @@ github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= @@ -446,6 +450,7 @@ github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -611,6 +616,7 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -642,8 +648,10 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o= +github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -684,6 +692,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -807,6 +816,7 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -851,6 +861,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.1/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= @@ -863,8 +874,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-community/pro-bing v0.1.0 h1:zjzLGhfNPP0bP1OlzGB+SJcguOViw7df12LPg2vUJh8= github.com/prometheus-community/pro-bing v0.1.0/go.mod h1:BpWlHurD9flHtzq8wrh8QGWYz9ka9z9ZJAyOel8ej58= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -910,6 +923,7 @@ github.com/quic-go/qtls-go1-20 v0.4.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58 github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1Q= github.com/quic-go/quic-go v0.40.1/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= @@ -918,6 +932,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -930,6 +945,7 @@ github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -947,9 +963,11 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3 h1:p5gZEKLYoL7wh8VrJesMaYeNxdEd1v3cb4irOk9zB54= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -980,6 +998,7 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= +github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -992,6 +1011,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1084,6 +1104,7 @@ go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1119,8 +1140,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1200,8 +1221,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1300,16 +1321,16 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1472,8 +1493,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/DataDog/dd-trace-go.v1 v1.58.1 h1:zhVNyN5V9G7LVuDh44q3wkcbQwtjIsmmUCieayojNYo= gopkg.in/DataDog/dd-trace-go.v1 v1.58.1/go.mod h1:SmnEjjV9ZQr4MWRSUYEpoPyNtmtRK5J6UuJdAma+Yxw= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -1486,6 +1507,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -1522,9 +1544,11 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33 h1:64QentohifmKGeTgJCHilDgfmQVuYE45fsaS9psJ3zY= -gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33/go.mod h1:sQuqOkxbfJq/GS2uSnqHphtXclHyk/ZrAGhZBxxsq6g= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +gvisor.dev/gvisor v0.0.0-20240331093445-9d995324d058 h1:sEwjOht+wejYFSYjiG20DEhe0pHyOjMq3j/F9fqlZVY= +gvisor.dev/gvisor v0.0.0-20240331093445-9d995324d058/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ= +honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/core/gvisorudpforwarder.go b/pkg/core/gvisorudpforwarder.go index c2b1c6c5..b1bfe6b6 100644 --- a/pkg/core/gvisorudpforwarder.go +++ b/pkg/core/gvisorudpforwarder.go @@ -55,7 +55,7 @@ func UDPForwarder(s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack. log.Debugf("[TUN-UDP] Error: can not connect: %v", err) return } - conn := gonet.NewUDPConn(s, w, endpoint) + conn := gonet.NewUDPConn(w, endpoint) go func() { defer conn.Close() defer remote.Close() diff --git a/pkg/core/tunendpoint.go b/pkg/core/tunendpoint.go index 36b48b58..7c404a08 100755 --- a/pkg/core/tunendpoint.go +++ b/pkg/core/tunendpoint.go @@ -3,7 +3,6 @@ package core import ( "context" "net" - "sync" "github.com/google/gopacket/layers" log "github.com/sirupsen/logrus" @@ -19,203 +18,105 @@ import ( "github.com/wencaiwulue/kubevpn/v2/pkg/config" ) -var _ stack.LinkEndpoint = (*tunEndpoint)(nil) - -// tunEndpoint /Users/naison/go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20220422052705-39790bd3a15a/pkg/tcpip/link/tun/device.go:122 -type tunEndpoint struct { - ctx context.Context - tun net.Conn - once sync.Once - endpoint *channel.Endpoint - engine config.Engine - - in chan<- *DataElem - out chan *DataElem -} - -// WritePackets writes packets. Must not be called with an empty list of -// packet buffers. -// -// WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList. -// it is not safe to use the PacketBufferList after a call to WritePackets. -func (e *tunEndpoint) WritePackets(p stack.PacketBufferList) (int, tcpip.Error) { - return e.endpoint.WritePackets(p) -} - -// MTU is the maximum transmission unit for this endpoint. This is -// usually dictated by the backing physical network; when such a -// physical network doesn't exist, the limit is generally 64k, which -// includes the maximum size of an IP packet. -func (e *tunEndpoint) MTU() uint32 { - return uint32(config.DefaultMTU) -} - -// MaxHeaderLength returns the maximum size the data link (and -// lower level layers combined) headers can have. Higher levels use this -// information to reserve space in the front of the packets they're -// building. -func (e *tunEndpoint) MaxHeaderLength() uint16 { - return 0 -} - -// LinkAddress returns the link address (typically a MAC) of the -// endpoint. -func (e *tunEndpoint) LinkAddress() tcpip.LinkAddress { - return e.endpoint.LinkAddress() -} - -// Capabilities returns the set of capabilities supported by the -// endpoint. -func (e *tunEndpoint) Capabilities() stack.LinkEndpointCapabilities { - return e.endpoint.LinkEPCapabilities -} - -// Attach attaches the data link layer endpoint to the network-layer -// dispatcher of the stack. -// -// Attach is called with a nil dispatcher when the endpoint's NIC is being -// removed. -func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) { - e.endpoint.Attach(dispatcher) - // queue --> tun - e.once.Do(func() { - go func() { - for { - select { - case <-e.ctx.Done(): - return - default: - } - read := e.endpoint.ReadContext(e.ctx) - if !read.IsNil() { - bb := read.ToView().AsSlice() - i := config.LPool.Get().([]byte)[:] - n := copy(i, bb) - bb = nil - e.out <- NewDataElem(i[:], n, nil, nil) - } - } - }() - // tun --> dispatcher - go func() { - // full(all use gvisor), mix(cluster network use gvisor), raw(not use gvisor) - for { - bytes := config.LPool.Get().([]byte)[:] - read, err := e.tun.Read(bytes[:]) - if err != nil { - // if context is still going - if e.ctx.Err() == nil { - log.Fatalf("[TUN]: read from tun failed: %v", err) - } else { - log.Info("tun device closed") - } - return - } - if read == 0 { - log.Warnf("[TUN]: read from tun length is %d", read) - continue - } - // Try to determine network protocol number, default zero. - var protocol tcpip.NetworkProtocolNumber - var ipProtocol int - var src, dst net.IP - // TUN interface with IFF_NO_PI enabled, thus - // we need to determine protocol from version field - version := bytes[0] >> 4 - if version == 4 { - protocol = header.IPv4ProtocolNumber - ipHeader, err := ipv4.ParseHeader(bytes[:read]) - if err != nil { - log.Errorf("parse ipv4 header failed: %s", err.Error()) - continue - } - ipProtocol = ipHeader.Protocol - src = ipHeader.Src - dst = ipHeader.Dst - } else if version == 6 { - protocol = header.IPv6ProtocolNumber - ipHeader, err := ipv6.ParseHeader(bytes[:read]) - if err != nil { - log.Errorf("parse ipv6 header failed: %s", err.Error()) - continue - } - ipProtocol = ipHeader.NextHeader - src = ipHeader.Src - dst = ipHeader.Dst - } else { - log.Debugf("[TUN-gvisor] unknown packet version %d", version) - continue - } - // only tcp and udp needs to distinguish transport engine - // gvisor: all network use gvisor - // mix: cluster network use gvisor, diy network use raw - // raw: all network use raw - if (ipProtocol == int(layers.IPProtocolUDP) || ipProtocol == int(layers.IPProtocolUDPLite) || ipProtocol == int(layers.IPProtocolTCP)) && - (e.engine == config.EngineGvisor || (e.engine == config.EngineMix && (!config.CIDR.Contains(dst) && !config.CIDR6.Contains(dst)))) { - pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ - ReserveHeaderBytes: 0, - Payload: buffer.MakeWithData(bytes[:read]), - }) - //defer pkt.DecRef() - config.LPool.Put(bytes[:]) - e.endpoint.InjectInbound(protocol, pkt) - log.Debugf("[TUN-%s] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read) - } else { - log.Debugf("[TUN-RAW] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), src.String(), dst, read) - e.in <- NewDataElem(bytes[:], read, src, dst) - } - } - }() - go func() { - for elem := range e.out { - _, err := e.tun.Write(elem.Data()[:elem.Length()]) - config.LPool.Put(elem.Data()[:]) - if err != nil { - log.Fatalf("[TUN] Fatal: failed to write data to tun device: %v", err) - } - } - }() - }) -} - -// IsAttached returns whether a NetworkDispatcher is attached to the -// endpoint. -func (e *tunEndpoint) IsAttached() bool { - return e.endpoint.IsAttached() -} - -// Wait waits for any worker goroutines owned by the endpoint to stop. -// -// For now, requesting that an endpoint's worker goroutine(s) stop is -// implementation specific. -// -// Wait will not block if the endpoint hasn't started any goroutines -// yet, even if it might later. -func (e *tunEndpoint) Wait() { - return -} - -// ARPHardwareType returns the ARPHRD_TYPE of the link endpoint. -// -// See: -// https://github.com/torvalds/linux/blob/aa0c9086b40c17a7ad94425b3b70dd1fdd7497bf/include/uapi/linux/if_arp.h#L30 -func (e *tunEndpoint) ARPHardwareType() header.ARPHardwareType { - return header.ARPHardwareNone -} - -// AddHeader adds a link layer header to the packet if required. -func (e *tunEndpoint) AddHeader(ptr stack.PacketBufferPtr) { - return -} - func NewTunEndpoint(ctx context.Context, tun net.Conn, mtu uint32, engine config.Engine, in chan<- *DataElem, out chan *DataElem) stack.LinkEndpoint { addr, _ := tcpip.ParseMACAddress("02:03:03:04:05:06") - return &tunEndpoint{ - ctx: ctx, - tun: tun, - endpoint: channel.New(tcp.DefaultReceiveBufferSize, mtu, addr), - engine: engine, - in: in, - out: out, - } + endpoint := channel.New(tcp.DefaultReceiveBufferSize, mtu, addr) + + go func() { + for { + select { + case <-ctx.Done(): + return + default: + } + read := endpoint.ReadContext(ctx) + if read != nil { + bb := read.ToView().AsSlice() + i := config.LPool.Get().([]byte)[:] + n := copy(i, bb) + bb = nil + out <- NewDataElem(i[:], n, nil, nil) + } + } + }() + // tun --> dispatcher + go func() { + // full(all use gvisor), mix(cluster network use gvisor), raw(not use gvisor) + for { + bytes := config.LPool.Get().([]byte)[:] + read, err := tun.Read(bytes[:]) + if err != nil { + // if context is still going + if ctx.Err() == nil { + log.Fatalf("[TUN]: read from tun failed: %v", err) + } else { + log.Info("tun device closed") + } + return + } + if read == 0 { + log.Warnf("[TUN]: read from tun length is %d", read) + continue + } + // Try to determine network protocol number, default zero. + var protocol tcpip.NetworkProtocolNumber + var ipProtocol int + var src, dst net.IP + // TUN interface with IFF_NO_PI enabled, thus + // we need to determine protocol from version field + version := bytes[0] >> 4 + if version == 4 { + protocol = header.IPv4ProtocolNumber + ipHeader, err := ipv4.ParseHeader(bytes[:read]) + if err != nil { + log.Errorf("parse ipv4 header failed: %s", err.Error()) + continue + } + ipProtocol = ipHeader.Protocol + src = ipHeader.Src + dst = ipHeader.Dst + } else if version == 6 { + protocol = header.IPv6ProtocolNumber + ipHeader, err := ipv6.ParseHeader(bytes[:read]) + if err != nil { + log.Errorf("parse ipv6 header failed: %s", err.Error()) + continue + } + ipProtocol = ipHeader.NextHeader + src = ipHeader.Src + dst = ipHeader.Dst + } else { + log.Debugf("[TUN-gvisor] unknown packet version %d", version) + continue + } + // only tcp and udp needs to distinguish transport engine + // gvisor: all network use gvisor + // mix: cluster network use gvisor, diy network use raw + // raw: all network use raw + if (ipProtocol == int(layers.IPProtocolUDP) || ipProtocol == int(layers.IPProtocolUDPLite) || ipProtocol == int(layers.IPProtocolTCP)) && + (engine == config.EngineGvisor || (engine == config.EngineMix && (!config.CIDR.Contains(dst) && !config.CIDR6.Contains(dst)))) { + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + ReserveHeaderBytes: 0, + Payload: buffer.MakeWithData(bytes[:read]), + }) + //defer pkt.DecRef() + config.LPool.Put(bytes[:]) + endpoint.InjectInbound(protocol, pkt) + log.Debugf("[TUN-%s] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read) + } else { + log.Debugf("[TUN-RAW] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), src.String(), dst, read) + in <- NewDataElem(bytes[:], read, src, dst) + } + } + }() + go func() { + for elem := range out { + _, err := tun.Write(elem.Data()[:elem.Length()]) + config.LPool.Put(elem.Data()[:]) + if err != nil { + log.Fatalf("[TUN] Fatal: failed to write data to tun device: %v", err) + } + } + }() + return endpoint } diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/README.md b/vendor/github.com/DataDog/go-sqllexer/testdata/README.md deleted file mode 100644 index 1237e812..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Test Suite - -The test suite is a collection of test SQL statements that are organized per DBMS. The test suite is used to test the SQL obfuscator and normalizer for correctness and completeness. It is also intended to cover DBMS specific edge cases, that are not covered by the generic unit tests. - -## Test Suite Structure - -The test suite is organized in the following way: - -```text -testdata -├── README.md -├── dbms1 -│   ├── query_type1 -│   │   ├── test1.json -│   └── query_type2 -│   ├── test1.json -dbms_test.go -``` - -The test suite is organized per DBMS. Each DBMS has a number of query types. Each query type has a number of test cases. Each test case consists of a SQL statement and the expected output of the obfuscator/normalizer. - -## Test File Format - -The test files are simple json files where each test case comes with one input SQL statements and an array of expected outputs. -Each expected output can optionally come with a configuration for the obfuscator and normalizer. The configuration is optional, because the default configuration is used if no configuration is provided. - -testcase.json: - -```json -{ - "input": "SELECT * FROM table1", - "outputs": [ - { - // Test case 1 - "expected": "SELECT * FROM table1", - "obfuscator_config": {...}, // optional - "normalizer_config": {...} // optional - }, - { - // Test case 2 - "expected": "SELECT * FROM table1", - "obfuscator_config": {...}, // optional - "normalizer_config": {...} // optional - } - ] -} -``` - -## How to write a new test case - -1. Create a new directory for the DBMS, if it does not exist yet. (this step is often not necessary) -2. Create a new directory for the query type, if it does not exist yet. -3. Create a new test case `.json` file with the SQL statement and expected output. Refer to the [test file format](#test-file-format) or `testcase struct` in [dbms_test.go](../dbms_test.go) for more details. -4. Run the test suite to verify that the test case is working as expected. diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/extremely-complex-poorly-written-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/extremely-complex-poorly-written-sql.json deleted file mode 100644 index a8442bdb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/extremely-complex-poorly-written-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH ComplexCTE AS (SELECT t1.id, t2.amount, ROW_NUMBER() OVER(PARTITION BY t1.customer_id ORDER BY t2.amount DESC) AS rn FROM (SELECT id, customer_id, status FROM orders WHERE YEAR(order_date) = YEAR(GETDATE()) AND status NOT IN ('Cancelled', 'Returned')) t1 INNER JOIN (SELECT order_id, SUM(amount) AS amount FROM order_details GROUP BY order_id) t2 ON t1.id = t2.order_id WHERE t2.amount > 500), SecondCTE AS (SELECT c1.*, c2.name, c2.region FROM ComplexCTE c1 INNER JOIN customers c2 ON c1.customer_id = c2.id WHERE c2.region IN ('East', 'West') AND c1.rn < 5) SELECT s.id, s.name, s.amount, p.product_name, CASE WHEN s.amount > 1000 THEN 'High' ELSE 'Low' END AS ValueCategory FROM SecondCTE s LEFT JOIN (SELECT DISTINCT p1.order_id, p2.product_name FROM order_products p1 INNER JOIN products p2 ON p1.product_id = p2.id) p ON s.id = p.order_id WHERE s.region = 'East' AND s.status LIKE '%Active%' ORDER BY s.amount DESC, s.name;", - "outputs": [ - { - "expected": "WITH ComplexCTE AS ( SELECT t?.id, t?.amount, ROW_NUMBER ( ) OVER ( PARTITION BY t?.customer_id ORDER BY t?.amount DESC ) FROM ( SELECT id, customer_id, status FROM orders WHERE YEAR ( order_date ) = YEAR ( GETDATE ( ) ) AND status NOT IN ( ? ) ) t? INNER JOIN ( SELECT order_id, SUM ( amount ) FROM order_details GROUP BY order_id ) t? ON t?.id = t?.order_id WHERE t?.amount > ? ), SecondCTE AS ( SELECT c?. *, c?.name, c?.region FROM ComplexCTE c? INNER JOIN customers c? ON c?.customer_id = c?.id WHERE c?.region IN ( ? ) AND c?.rn < ? ) SELECT s.id, s.name, s.amount, p.product_name, CASE WHEN s.amount > ? THEN ? ELSE ? END FROM SecondCTE s LEFT JOIN ( SELECT DISTINCT p?.order_id, p?.product_name FROM order_products p? INNER JOIN products p? ON p?.product_id = p?.id ) p ON s.id = p.order_id WHERE s.region = ? AND s.status LIKE ? ORDER BY s.amount DESC, s.name", - "statement_metadata": { - "size": 79, - "tables": ["orders", "order_details", "ComplexCTE", "customers", "SecondCTE", "order_products", "products"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/indexed-views.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/indexed-views.json deleted file mode 100644 index 5d78886d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/indexed-views.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "input": "CREATE VIEW dbo.OrderSummary WITH SCHEMABINDING AS SELECT customer_id, COUNT_BIG(*) AS TotalOrders, SUM(amount) AS TotalAmount FROM dbo.orders GROUP BY customer_id; CREATE UNIQUE CLUSTERED INDEX IDX_V1 ON dbo.OrderSummary(customer_id);", - "outputs": [ - { - "expected": "CREATE VIEW dbo.OrderSummary WITH SCHEMABINDING AS SELECT customer_id, COUNT_BIG ( * ), SUM ( amount ) FROM dbo.orders GROUP BY customer_id; CREATE UNIQUE CLUSTERED INDEX IDX_V? ON dbo.OrderSummary ( customer_id )", - "statement_metadata": { - "size": 22, - "tables": ["dbo.orders"], - "commands": ["CREATE", "SELECT"], - "comments": [], - "procedures": [], - "views": ["dbo.OrderSummary"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/partitioned-tables-indexes.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/partitioned-tables-indexes.json deleted file mode 100644 index c06612ec..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/partitioned-tables-indexes.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PARTITION FUNCTION myRangePF1 (INT) AS RANGE LEFT FOR VALUES (1, 100, 1000); CREATE PARTITION SCHEME myScheme AS PARTITION myRangePF1 TO ([PRIMARY], [SECONDARY], [TERTIARY]); CREATE TABLE partitionedTable (id INT) ON myScheme(id);", - "outputs": [ - { - "expected": "CREATE PARTITION FUNCTION myRangePF? ( INT ) LEFT FOR VALUES ( ? ); CREATE PARTITION SCHEME myScheme myRangePF? TO ( PRIMARY, SECONDARY, TERTIARY ); CREATE TABLE partitionedTable ( id INT ) ON myScheme ( id )", - "statement_metadata": { - "size": 22, - "tables": ["partitionedTable"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/super-complex-poorly-written-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/super-complex-poorly-written-sql.json deleted file mode 100644 index 41bd0e0e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/super-complex-poorly-written-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT a.*, b.totalAmount, CASE WHEN c.id IS NOT NULL THEN d.description ELSE 'N/A' END AS description FROM (SELECT id, name, status, customer_id FROM orders WHERE order_date > DATEADD(month, -6, GETDATE()) AND status IN ('Pending', 'Completed') AND customer_id IN (SELECT customer_id FROM customers WHERE region IN ('East', 'West') AND last_order_date > DATEADD(year, -1, GETDATE())) ORDER BY name DESC) a INNER JOIN (SELECT order_id, SUM(amount) AS totalAmount FROM order_details GROUP BY order_id) b ON a.id = b.order_id LEFT JOIN audit_log c ON a.id = c.order_id LEFT JOIN (SELECT DISTINCT status, description FROM status_descriptions) d ON a.status = d.status WHERE a.name LIKE '%test%' AND (b.totalAmount > 1000 OR b.totalAmount IS NULL) ORDER BY a.order_date DESC, a.name;", - "outputs": [ - { - "expected": "SELECT a. *, b.totalAmount, CASE WHEN c.id IS NOT ? THEN d.description ELSE ? END FROM ( SELECT id, name, status, customer_id FROM orders WHERE order_date > DATEADD ( month, ?, GETDATE ( ) ) AND status IN ( ? ) AND customer_id IN ( SELECT customer_id FROM customers WHERE region IN ( ? ) AND last_order_date > DATEADD ( year, ?, GETDATE ( ) ) ) ORDER BY name DESC ) a INNER JOIN ( SELECT order_id, SUM ( amount ) FROM order_details GROUP BY order_id ) b ON a.id = b.order_id LEFT JOIN audit_log c ON a.id = c.order_id LEFT JOIN ( SELECT DISTINCT status, description FROM status_descriptions ) d ON a.status = d.status WHERE a.name LIKE ? AND ( b.totalAmount > ? OR b.totalAmount IS ? ) ORDER BY a.order_date DESC, a.name", - "statement_metadata": { - "size": 66, - "tables": ["orders", "customers", "order_details", "audit_log", "status_descriptions"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/conditional-delete-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/conditional-delete-case.json deleted file mode 100644 index 66a0da8c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/conditional-delete-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = CASE WHEN order_date < GETDATE() - 90 THEN 'Expired' ELSE 'Active' END;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = CASE WHEN order_date < GETDATE ( ) - ? THEN ? ELSE ? END", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-basic.json deleted file mode 100644 index eb9025d8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Cancelled';", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-cascade.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-cascade.json deleted file mode 100644 index 2e4a3b54..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-cascade.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE region = 'North'; -- Assuming CASCADE DELETE is set up on the foreign key in the orders table", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE region = ?", - "statement_metadata": { - "size": 90, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": ["-- Assuming CASCADE DELETE is set up on the foreign key in the orders table"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-rowlock-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-rowlock-hint.json deleted file mode 100644 index b5649652..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-rowlock-hint.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WITH (ROWLOCK) WHERE status = 'Pending';", - "outputs": [ - { - "expected": "DELETE FROM orders WITH ( ROWLOCK ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-subquery.json deleted file mode 100644 index 2d739ca4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE customer_id IN (SELECT id FROM customers WHERE region = 'West');", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE customer_id IN ( SELECT id FROM customers WHERE region = ? )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-table-variable.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-table-variable.json deleted file mode 100644 index 91809fe3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-table-variable.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE @ExpiredOrders TABLE (id INT); INSERT INTO @ExpiredOrders (id) SELECT id FROM orders WHERE order_date < GETDATE() - 365; DELETE FROM orders WHERE id IN (SELECT id FROM @ExpiredOrders);", - "outputs": [ - { - "expected": "DECLARE @ExpiredOrders TABLE ( id INT ); INSERT INTO @ExpiredOrders ( id ) SELECT id FROM orders WHERE order_date < GETDATE ( ) - ?; DELETE FROM orders WHERE id IN ( SELECT id FROM @ExpiredOrders )", - "statement_metadata": { - "size": 24, - "tables": ["orders"], - "commands": ["INSERT", "SELECT", "DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-cte.json deleted file mode 100644 index 69121952..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-cte.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH OldOrders AS (SELECT id FROM orders WHERE order_date < '2022-01-01') DELETE FROM orders WHERE id IN (SELECT id FROM OldOrders);", - "outputs": [ - { - "expected": "WITH OldOrders AS ( SELECT id FROM orders WHERE order_date < ? ) DELETE FROM orders WHERE id IN ( SELECT id FROM OldOrders )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "OldOrders"], - "commands": ["SELECT", "DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-join.json deleted file mode 100644 index 7fa63f36..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE o FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = 'East' AND o.status = 'Pending';", - "outputs": [ - { - "expected": "DELETE o FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = ? AND o.status = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-output.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-output.json deleted file mode 100644 index b3373cc5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-output.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders OUTPUT DELETED.* WHERE status = 'Shipped';", - "outputs": [ - { - "expected": "DELETE FROM orders OUTPUT DELETED. * WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-top.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-top.json deleted file mode 100644 index 12116c3b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-top.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE TOP (10) FROM orders WHERE status = 'Pending';", - "outputs": [ - { - "expected": "DELETE TOP ( ? ) FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-basic.json deleted file mode 100644 index fe1d97fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, order_date, status) VALUES (1, GETDATE(), 'Pending');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, order_date, status ) VALUES ( ?, GETDATE ( ), ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-default-values.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-default-values.json deleted file mode 100644 index 9c8a08ad..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-default-values.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders DEFAULT VALUES;", - "outputs": [ - { - "expected": "INSERT INTO orders DEFAULT VALUES", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-identity-insert.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-identity-insert.json deleted file mode 100644 index 5fdc3e03..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-identity-insert.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET IDENTITY_INSERT orders ON; INSERT INTO orders (id, customer_id, order_date, status) VALUES (100, 3, GETDATE(), 'Pending'); SET IDENTITY_INSERT orders OFF;", - "outputs": [ - { - "expected": "SET IDENTITY_INSERT orders ON; INSERT INTO orders ( id, customer_id, order_date, status ) VALUES ( ?, GETDATE ( ), ? ); SET IDENTITY_INSERT orders OFF", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-merge.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-merge.json deleted file mode 100644 index 81d75e6e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-merge.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "MERGE INTO orders AS target USING (SELECT customer_id, order_date, status FROM incoming_orders) AS source ON target.id = source.id WHEN NOT MATCHED THEN INSERT (customer_id, order_date, status) VALUES (source.customer_id, source.order_date, source.status);", - "outputs": [ - { - "expected": "MERGE INTO orders USING ( SELECT customer_id, order_date, status FROM incoming_orders ) ON target.id = source.id WHEN NOT MATCHED THEN INSERT ( customer_id, order_date, status ) VALUES ( source.customer_id, source.order_date, source.status )", - "statement_metadata": { - "size": 38, - "tables": ["orders", "incoming_orders"], - "commands": ["MERGE", "SELECT", "INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-output.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-output.json deleted file mode 100644 index d16031fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-output.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, order_date, status) OUTPUT INSERTED.id VALUES (3, GETDATE(), 'Processing');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, order_date, status ) OUTPUT INSERTED.id VALUES ( ?, GETDATE ( ), ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-select-into.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-select-into.json deleted file mode 100644 index 74700113..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-select-into.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * INTO new_orders FROM orders WHERE status = 'Pending';", - "outputs": [ - { - "expected": "SELECT * INTO new_orders FROM orders WHERE status = ?", - "statement_metadata": { - "size": 22, - "tables": ["new_orders", "orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-subquery-values.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-subquery-values.json deleted file mode 100644 index af7a2a0f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-subquery-values.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO order_totals (order_id, total_amount) VALUES ((SELECT MAX(id) FROM orders), 500);", - "outputs": [ - { - "expected": "INSERT INTO order_totals ( order_id, total_amount ) VALUES ( ( SELECT MAX ( id ) FROM orders ), ? )", - "statement_metadata": { - "size": 30, - "tables": ["order_totals", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-top-orderby.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-top-orderby.json deleted file mode 100644 index 2469fd38..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-top-orderby.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO top_orders (id, amount) SELECT TOP 5 id, amount FROM orders ORDER BY amount DESC;", - "outputs": [ - { - "expected": "INSERT INTO top_orders ( id, amount ) SELECT TOP ? id, amount FROM orders ORDER BY amount DESC", - "statement_metadata": { - "size": 28, - "tables": ["top_orders", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-values-multiple-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-values-multiple-rows.json deleted file mode 100644 index ec79720b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-values-multiple-rows.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customers (name, region) VALUES ('John Doe', 'North'), ('Jane Smith', 'South');", - "outputs": [ - { - "expected": "INSERT INTO customers ( name, region ) VALUES ( ? ), ( ? )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-with-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-with-select.json deleted file mode 100644 index 30392800..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-with-select.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders_archive (id, customer_id, order_date, status) SELECT id, customer_id, order_date, status FROM orders WHERE status = 'Completed';", - "outputs": [ - { - "expected": "INSERT INTO orders_archive ( id, customer_id, order_date, status ) SELECT id, customer_id, order_date, status FROM orders WHERE status = ?", - "statement_metadata": { - "size": 32, - "tables": ["orders_archive", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/using-throw-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/using-throw-error-handling.json deleted file mode 100644 index 8458f773..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/using-throw-error-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "BEGIN TRY INSERT INTO orders (customer_id, amount) VALUES (1, -100); END TRY BEGIN CATCH THROW; END CATCH;", - "outputs": [ - { - "expected": "BEGIN TRY INSERT INTO orders ( customer_id, amount ) VALUES ( ? ); END TRY BEGIN CATCH THROW; END CATCH", - "statement_metadata": { - "size": 17, - "tables": ["orders"], - "commands": ["BEGIN", "INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-operations.json deleted file mode 100644 index 485e68de..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-operations.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ManageCustomerOrders @customerId INT AS BEGIN SET NOCOUNT ON; IF NOT EXISTS (SELECT 1 FROM customers WHERE id = @customerId) BEGIN THROW 50001, 'Customer not found.', 1; END; UPDATE orders SET status = 'Reviewed' WHERE customer_id = @customerId AND status = 'Pending'; INSERT INTO audit_log (description) VALUES ('Orders reviewed for customer ' + CAST(@customerId AS NVARCHAR(10))); END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ManageCustomerOrders @customerId INT AS BEGIN SET NOCOUNT ON; IF NOT EXISTS (SELECT ? FROM customers WHERE id = @customerId) BEGIN THROW ?, ?, ?; END; UPDATE orders SET status = ? WHERE customer_id = @customerId AND status = ?; INSERT INTO audit_log (description) VALUES (? + CAST(@customerId AS NVARCHAR(?))); END;", - "statement_metadata": { - "size": 78, - "tables": ["customers", "orders", "audit_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "SELECT", "UPDATE", "INSERT"], - "comments": [], - "procedures": ["ManageCustomerOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-statements.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-statements.json deleted file mode 100644 index 886e455a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-statements.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE FullOrderManagement AS\nBEGIN\n-- Comprehensive procedure to manage order lifecycle\n-- It checks, processes, and logs orders.\nSET NOCOUNT ON;\n-- Check for new orders\nUPDATE orders SET status = 'Processing' WHERE status = 'New';\n-- Log the update\nINSERT INTO audit_log (description) VALUES ('Processed new orders.');\n-- Finalize processed orders\nUPDATE orders SET status = 'Finalized' WHERE status = 'Processing';\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE FullOrderManagement AS BEGIN SET NOCOUNT ON; UPDATE orders SET status = ? WHERE status = ?; INSERT INTO audit_log (description) VALUES (?); UPDATE orders SET status = ? WHERE status = ?; END;", - "statement_metadata": { - "size": 223, - "tables": ["orders", "audit_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE", "INSERT"], - "comments": ["-- Comprehensive procedure to manage order lifecycle", "-- It checks, processes, and logs orders.", "-- Check for new orders", "-- Log the update", "-- Finalize processed orders"], - "procedures": ["FullOrderManagement"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-comprehensive-logic-explanation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-comprehensive-logic-explanation.json deleted file mode 100644 index 3036098a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-comprehensive-logic-explanation.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ValidateOrderDetails AS\nBEGIN\n/*\n Procedure Name: ValidateOrderDetails\n Purpose: To validate the details of orders before processing.\n Detailed Description:\n This procedure runs through each order in the 'orders' table\n and checks if all required details are present.\n It updates the 'order_status' table with 'Valid' or 'Invalid'.\n It's a critical part of the order processing pipeline to ensure data integrity.\n*/\n-- Validation logic\nUPDATE orders SET status = CASE WHEN customer_id IS NOT NULL AND total_amount IS NOT NULL THEN 'Valid' ELSE 'Invalid' END;\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ValidateOrderDetails AS BEGIN UPDATE orders SET status = CASE WHEN customer_id IS NOT NULL AND total_amount IS NOT NULL THEN ? ELSE ? END; END;", - "statement_metadata": { - "size": 466, - "tables": ["orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE"], - "comments": ["/*\n Procedure Name: ValidateOrderDetails\n Purpose: To validate the details of orders before processing.\n Detailed Description:\n This procedure runs through each order in the 'orders' table\n and checks if all required details are present.\n It updates the 'order_status' table with 'Valid' or 'Invalid'.\n It's a critical part of the order processing pipeline to ensure data integrity.\n*/", "-- Validation logic"], - "procedures": ["ValidateOrderDetails"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-conditional-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-conditional-logic.json deleted file mode 100644 index 81008418..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-conditional-logic.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE CheckOrderStatus @orderId INT AS\nBEGIN\n-- Checks the status of an order and logs if it's delayed.\n-- This is part of our order monitoring system.\nSET NOCOUNT ON;\nDECLARE @status NVARCHAR(50);\nSELECT @status = status FROM orders WHERE id = @orderId;\nIF @status = 'Delayed'\nBEGIN\n INSERT INTO audit_log (description) VALUES ('Order ' + CAST(@orderId AS NVARCHAR(10)) + ' is delayed.');\nEND\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE CheckOrderStatus @orderId INT AS BEGIN SET NOCOUNT ON; DECLARE @status NVARCHAR(?); SELECT @status = status FROM orders WHERE id = @orderId; IF @status = ? BEGIN INSERT INTO audit_log (description) VALUES (? + CAST(@orderId AS NVARCHAR(?)) + ?); END END;", - "statement_metadata": { - "size": 164, - "tables": ["orders", "audit_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "SELECT", "INSERT"], - "comments": ["-- Checks the status of an order and logs if it's delayed.", "-- This is part of our order monitoring system."], - "procedures": ["CheckOrderStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-cursor-temp-table.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-cursor-temp-table.json deleted file mode 100644 index 7f6ad14e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-cursor-temp-table.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ArchiveOldOrders AS BEGIN SET NOCOUNT ON; DECLARE @orderId INT; DECLARE orderCursor CURSOR FOR SELECT id FROM orders WHERE order_date < GETDATE() - 365; OPEN orderCursor; FETCH NEXT FROM orderCursor INTO @orderId; WHILE @@FETCH_STATUS = 0 BEGIN INSERT INTO orders_archive (id, status) SELECT id, status FROM orders WHERE id = @orderId; FETCH NEXT FROM orderCursor INTO @orderId; END; CLOSE orderCursor; DEALLOCATE orderCursor; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ArchiveOldOrders AS BEGIN SET NOCOUNT ON; DECLARE @orderId INT; DECLARE orderCursor CURSOR FOR SELECT id FROM orders WHERE order_date < GETDATE() - ?; OPEN orderCursor; FETCH NEXT FROM orderCursor INTO @orderId; WHILE @@FETCH_STATUS = ? BEGIN INSERT INTO orders_archive (id, status) SELECT id, status FROM orders WHERE id = @orderId; FETCH NEXT FROM orderCursor INTO @orderId; END; CLOSE orderCursor; DEALLOCATE orderCursor; END;", - "statement_metadata": { - "size": 75, - "tables": ["orders", "orderCursor", "orders_archive"], - "commands": ["CREATE", "ALTER", "BEGIN", "SELECT", "INSERT"], - "comments": [], - "procedures": ["ArchiveOldOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-detailed-documentation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-detailed-documentation.json deleted file mode 100644 index 68166fd5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-detailed-documentation.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS\nBEGIN\n/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/\nSET NOCOUNT ON;\n-- Insert audit records\nINSERT INTO audit_log (description) SELECT 'Order processed: ' + CAST(id AS NVARCHAR(10)) FROM orders WHERE status = 'Processed';\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS BEGIN SET NOCOUNT ON; INSERT INTO audit_log (description) SELECT ? + CAST(id AS NVARCHAR(?)) FROM orders WHERE status = ?; END;", - "statement_metadata": { - "size": 478, - "tables": ["audit_log", "orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "INSERT", "SELECT"], - "comments": ["/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/", "-- Insert audit records"], - "procedures": ["AuditOrderProcessing"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-error-handling.json deleted file mode 100644 index 8600e30f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-error-handling.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE UpdateOrderStatus @orderId INT, @newStatus NVARCHAR(50) AS BEGIN SET NOCOUNT ON; BEGIN TRY BEGIN TRANSACTION; DECLARE @sql NVARCHAR(MAX) = N'UPDATE orders SET status = ''' + @newStatus + ''' WHERE id = ' + CAST(@orderId AS NVARCHAR(10)) + ';'; EXEC sp_executesql @sql; COMMIT TRANSACTION; END TRY BEGIN CATCH ROLLBACK TRANSACTION; THROW; END CATCH; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE UpdateOrderStatus @orderId INT, @newStatus NVARCHAR(?) AS BEGIN SET NOCOUNT ON; BEGIN TRY BEGIN TRANSACTION; DECLARE @sql NVARCHAR(MAX) = N ? ? + @newStatus + ? ? + CAST(@orderId AS NVARCHAR(?)) + ?; EXEC sp_executesql @sql; COMMIT TRANSACTION; END TRY BEGIN CATCH ROLLBACK TRANSACTION; THROW; END CATCH; END;", - "statement_metadata": { - "size": 43, - "tables": [], - "commands": ["CREATE", "ALTER", "BEGIN", "EXEC", "COMMIT"], - "comments": [], - "procedures": ["UpdateOrderStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-execution.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-execution.json deleted file mode 100644 index 9d3e043d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-execution.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE DynamicCustomerQuery @query NVARCHAR(MAX) AS\nBEGIN\n-- Executes a dynamic SQL query based on the input.\n-- Used for flexible customer data retrieval.\nSET NOCOUNT ON;\nEXEC sp_executesql @query;\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE DynamicCustomerQuery @query NVARCHAR(MAX) AS BEGIN SET NOCOUNT ON; EXEC sp_executesql @query; END;", - "statement_metadata": { - "size": 136, - "tables": [], - "commands": ["CREATE", "ALTER", "BEGIN", "EXEC"], - "comments": ["-- Executes a dynamic SQL query based on the input.", "-- Used for flexible customer data retrieval."], - "procedures": ["DynamicCustomerQuery"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-executing-another.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-executing-another.json deleted file mode 100644 index abf61515..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-executing-another.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE FullOrderProcessing AS BEGIN SET NOCOUNT ON; EXEC ProcessOrders; EXEC UpdateOrderStatus 1, 'Dispatched'; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE FullOrderProcessing AS BEGIN SET NOCOUNT ON; EXEC ProcessOrders; EXEC UpdateOrderStatus ?, ?; END;", - "statement_metadata": { - "size": 39, - "tables": [], - "commands": ["CREATE", "ALTER", "BEGIN", "EXEC"], - "comments": [], - "procedures": ["FullOrderProcessing"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-temp-tables-transaction.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-temp-tables-transaction.json deleted file mode 100644 index d83db6ba..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-temp-tables-transaction.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ProcessOrders AS BEGIN SET NOCOUNT ON; BEGIN TRANSACTION; CREATE TABLE #TempOrders (id INT, status NVARCHAR(50)); INSERT INTO #TempOrders (id, status) SELECT id, status FROM orders WHERE status = 'Pending'; UPDATE orders SET status = 'Processing' WHERE status = 'Pending'; COMMIT TRANSACTION; SELECT * FROM #TempOrders; DROP TABLE #TempOrders; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ProcessOrders AS BEGIN SET NOCOUNT ON; BEGIN TRANSACTION; CREATE TABLE #TempOrders (id INT, status NVARCHAR(?)); INSERT INTO #TempOrders (id, status) SELECT id, status FROM orders WHERE status = ?; UPDATE orders SET status = ? WHERE status = ?; COMMIT TRANSACTION; SELECT * FROM #TempOrders; DROP TABLE #TempOrders; END;", - "statement_metadata": { - "size": 74, - "tables": ["#TempOrders", "orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "INSERT", "SELECT", "UPDATE", "COMMIT", "DROP"], - "comments": [], - "procedures": ["ProcessOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-try-catch-error.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-try-catch-error.json deleted file mode 100644 index 909c2889..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-try-catch-error.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ProcessPayment @orderId INT, @amount MONEY AS\nBEGIN\n-- This procedure processes payments for orders.\n-- It includes error handling using TRY-CATCH.\nSET NOCOUNT ON;\nBEGIN TRY\n -- Attempt to process the payment\n UPDATE orders SET payment_received = 1, payment_amount = @amount WHERE id = @orderId;\nEND TRY\nBEGIN CATCH\n -- Handle the error\n INSERT INTO error_log (error_message) VALUES (ERROR_MESSAGE());\nEND CATCH\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ProcessPayment @orderId INT, @amount MONEY AS BEGIN SET NOCOUNT ON; BEGIN TRY UPDATE orders SET payment_received = ?, payment_amount = @amount WHERE id = @orderId; END TRY BEGIN CATCH INSERT INTO error_log (error_message) VALUES (ERROR_MESSAGE()); END CATCH END;", - "statement_metadata": { - "size": 203, - "tables": ["orders", "error_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE", "INSERT"], - "comments": ["-- This procedure processes payments for orders.", "-- It includes error handling using TRY-CATCH.", "-- Attempt to process the payment", "-- Handle the error"], - "procedures": ["ProcessPayment"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-version-control.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-version-control.json deleted file mode 100644 index 68166fd5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-version-control.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS\nBEGIN\n/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/\nSET NOCOUNT ON;\n-- Insert audit records\nINSERT INTO audit_log (description) SELECT 'Order processed: ' + CAST(id AS NVARCHAR(10)) FROM orders WHERE status = 'Processed';\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS BEGIN SET NOCOUNT ON; INSERT INTO audit_log (description) SELECT ? + CAST(id AS NVARCHAR(?)) FROM orders WHERE status = ?; END;", - "statement_metadata": { - "size": 478, - "tables": ["audit_log", "orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "INSERT", "SELECT"], - "comments": ["/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/", "-- Insert audit records"], - "procedures": ["AuditOrderProcessing"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-with-params-and-execution.json.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-with-params-and-execution.json.json deleted file mode 100644 index 9c3d20a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-with-params-and-execution.json.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE UpdateCustomerStatus @customerId INT, @newStatus NVARCHAR(50) AS\nBEGIN\n-- This procedure updates the status of a customer.\n-- It takes the customer ID and the new status as parameters.\nSET NOCOUNT ON;\nUPDATE customers SET status = @newStatus WHERE id = @customerId;\nEND;\nEXEC UpdateCustomerStatus 123, 'Active';", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE UpdateCustomerStatus @customerId INT, @newStatus NVARCHAR(?) AS BEGIN SET NOCOUNT ON; UPDATE customers SET status = @newStatus WHERE id = @customerId; END; EXEC UpdateCustomerStatus ?, ?;", - "statement_metadata": { - "size": 167, - "tables": ["customers"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE", "EXEC"], - "comments": ["-- This procedure updates the status of a customer.", "-- It takes the customer ID and the new status as parameters."], - "procedures": ["UpdateCustomerStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] -} diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/basic-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/basic-select.json deleted file mode 100644 index 1c103a26..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/basic-select.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, name, email FROM customers WHERE active = 1;", - "outputs": [ - { - "expected": "SELECT id, name, email FROM customers WHERE active = ?", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/data-compression-features.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/data-compression-features.json deleted file mode 100644 index 96ca517a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/data-compression-features.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE orders_compressed WITH (DATA_COMPRESSION = PAGE) AS SELECT * FROM orders;", - "outputs": [ - { - "expected": "CREATE TABLE orders_compressed WITH ( DATA_COMPRESSION = PAGE ) AS SELECT * FROM orders", - "statement_metadata": { - "size": 35, - "tables": ["orders_compressed", "orders"], - "commands": ["CREATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/filetable-storage.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/filetable-storage.json deleted file mode 100644 index 15c83fd7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/filetable-storage.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE DocumentStore AS FileTable;", - "outputs": [ - { - "expected": "CREATE TABLE DocumentStore", - "statement_metadata": { - "size": 19, - "tables": ["DocumentStore"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/pivot-unpivot-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/pivot-unpivot-operations.json deleted file mode 100644 index a2e5b22c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/pivot-unpivot-operations.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM (SELECT customer_id, product_id, amount FROM order_details) AS SourceTable PIVOT (SUM(amount) FOR product_id IN ([1], [2], [3])) AS PivotTable;", - "outputs": [ - { - "expected": "SELECT * FROM ( SELECT customer_id, product_id, amount FROM order_details ) PIVOT ( SUM ( amount ) FOR product_id IN ( ? ) )", - "statement_metadata": { - "size": 19, - "tables": ["order_details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-choose.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-choose.json deleted file mode 100644 index 821d7276..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-choose.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, name, CHOOSE(department_id, 'Sales', 'Engineering', 'HR') AS DepartmentName FROM employees;", - "outputs": [ - { - "expected": "SELECT id, name, CHOOSE ( department_id, ?, ?, ? ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-format.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-format.json deleted file mode 100644 index e1129aae..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-format.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT name, FORMAT(joining_date, 'dd-MM-yyyy') AS FormattedJoiningDate FROM employees;", - "outputs": [ - { - "expected": "SELECT name, FORMAT ( joining_date, ? ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-full-outer-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-full-outer-join.json deleted file mode 100644 index d2096762..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-full-outer-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT c.name, o.order_date FROM customers c FULL OUTER JOIN orders o ON c.id = o.customer_id WHERE c.region = 'West' OR o.amount > 500;", - "outputs": [ - { - "expected": "SELECT c.name, o.order_date FROM customers c FULL OUTER JOIN orders o ON c.id = o.customer_id WHERE c.region = ? OR o.amount > ?", - "statement_metadata": { - "size": 25, - "tables": ["customers", "orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-identity.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-identity.json deleted file mode 100644 index 18bb4e11..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-identity.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO employees (name, department) VALUES ('John Doe', 'Sales'); SELECT @@IDENTITY AS LastInsertedIdentity;", - "outputs": [ - { - "expected": "INSERT INTO employees ( name, department ) VALUES ( ? ); SELECT @@IDENTITY", - "statement_metadata": { - "size": 21, - "tables": ["employees"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-iif.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-iif.json deleted file mode 100644 index 9ecc3b4a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-iif.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT name, IIF(gender = 'M', 'Male', 'Female') AS GenderDescription FROM employees;", - "outputs": [ - { - "expected": "SELECT name, IIF ( gender = ?, ?, ? ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-join-aggregation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-join-aggregation.json deleted file mode 100644 index 1de78462..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-join-aggregation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT c.name, SUM(o.amount) AS total_sales FROM customers c INNER JOIN orders o ON c.id = o.customer_id GROUP BY c.name;", - "outputs": [ - { - "expected": "SELECT c.name, SUM ( o.amount ) FROM customers c INNER JOIN orders o ON c.id = o.customer_id GROUP BY c.name", - "statement_metadata": { - "size": 25, - "tables": ["customers", "orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-system-user.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-system-user.json deleted file mode 100644 index f841baab..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-system-user.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT SYSTEM_USER AS CurrentSystemUser, USER_NAME() AS CurrentDatabaseUser, NEWID() AS UniqueIdentifier;", - "outputs": [ - { - "expected": "SELECT SYSTEM_USER, USER_NAME ( ), NEWID ( )", - "statement_metadata": { - "size": 6, - "tables": [], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-pivot.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-pivot.json deleted file mode 100644 index 3c6c8ce2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-pivot.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM (SELECT customer_id, product_id, amount FROM orders) AS SourceTable PIVOT (SUM(amount) FOR product_id IN ([1], [2], [3])) AS PivotTable;", - "outputs": [ - { - "expected": "SELECT * FROM ( SELECT customer_id, product_id, amount FROM orders ) PIVOT ( SUM ( amount ) FOR product_id IN ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-try-convert.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-try-convert.json deleted file mode 100644 index aed4952d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-try-convert.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, TRY_CONVERT(float, total_amount) AS TotalFloat FROM orders WHERE TRY_CONVERT(float, total_amount) IS NOT NULL;", - "outputs": [ - { - "expected": "SELECT id, TRY_CONVERT ( float, total_amount ) FROM orders WHERE TRY_CONVERT ( float, total_amount ) IS NOT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-cte.json deleted file mode 100644 index 48add7e9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-cte.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "WITH RankedOrders AS (SELECT o.id, o.customer_id, RANK() OVER (PARTITION BY o.customer_id ORDER BY o.amount DESC) AS rnk FROM orders o) SELECT id FROM RankedOrders WHERE rnk = 1;", - "outputs": [ - { - "expected": "WITH RankedOrders AS ( SELECT o.id, o.customer_id, RANK ( ) OVER ( PARTITION BY o.customer_id ORDER BY o.amount DESC ) FROM orders o ) SELECT id FROM RankedOrders WHERE rnk = ?", - "statement_metadata": { - "size": 24, - "tables": ["orders", "RankedOrders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "WITH RankedOrders AS (SELECT o.id, o.customer_id, RANK() OVER (PARTITION BY o.customer_id ORDER BY o.amount DESC) AS rnk FROM orders o) SELECT id FROM RankedOrders WHERE rnk = ?;", - "normalizer_config": { - "keep_sql_alias": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-offset-fetch.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-offset-fetch.json deleted file mode 100644 index c00765a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-offset-fetch.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, name FROM customers ORDER BY name OFFSET 10 ROWS FETCH NEXT 5 ROWS ONLY;", - "outputs": [ - { - "expected": "SELECT id, name FROM customers ORDER BY name OFFSET ? ROWS FETCH NEXT ? ROWS ONLY", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-string-agg.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-string-agg.json deleted file mode 100644 index 838f137e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-string-agg.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT STRING_AGG(name, ', ') AS names FROM customers WHERE region = 'East';", - "outputs": [ - { - "expected": "SELECT STRING_AGG ( name, ? ) FROM customers WHERE region = ?", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-table-sample.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-table-sample.json deleted file mode 100644 index 4953ab6e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-table-sample.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM customers TABLESAMPLE (10 PERCENT);", - "outputs": [ - { - "expected": "SELECT * FROM customers TABLESAMPLE ( ? PERCENT )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-window-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-window-function.json deleted file mode 100644 index cf918833..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-window-function.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT id, amount, ROW_NUMBER() OVER (ORDER BY amount DESC) AS rownum FROM orders;", - "outputs": [ - { - "expected": "SELECT id, amount, ROW_NUMBER ( ) OVER ( ORDER BY amount DESC ) AS rownum FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT id, amount, ROW_NUMBER() OVER (ORDER BY amount DESC) AS rownum FROM orders;", - "normalizer_config": { - "keep_sql_alias": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/service-broker.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/service-broker.json deleted file mode 100644 index 4f6cc45c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/service-broker.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE MESSAGE TYPE RequestMessage VALIDATION = WELL_FORMED_XML; CREATE CONTRACT RequestContract (RequestMessage SENT BY INITIATOR);", - "outputs": [ - { - "expected": "CREATE MESSAGE TYPE RequestMessage VALIDATION = WELL_FORMED_XML; CREATE CONTRACT RequestContract ( RequestMessage SENT BY INITIATOR )", - "statement_metadata": { - "size": 6, - "tables": [], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/spatial-data-types-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/spatial-data-types-functions.json deleted file mode 100644 index 92519e16..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/spatial-data-types-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT geography::Point(latitude, longitude, 4326).ToString() FROM locations;", - "outputs": [ - { - "expected": "SELECT geography :: Point ( latitude, longitude, ? ) . ToString ( ) FROM locations", - "statement_metadata": { - "size": 15, - "tables": ["locations"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/xml-data-types-queries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/xml-data-types-queries.json deleted file mode 100644 index ad9cae93..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/xml-data-types-queries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT xmlData.value('(/Customer/Name)[1]', 'nvarchar(100)') AS CustomerName FROM customerRecords;", - "outputs": [ - { - "expected": "SELECT xmlData.value ( ? ) FROM customerRecords", - "statement_metadata": { - "size": 21, - "tables": ["customerRecords"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/conditional-update-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/conditional-update-case.json deleted file mode 100644 index bc0c0f6e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/conditional-update-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CASE WHEN amount >= 1000 THEN 'High Value' ELSE 'Regular' END;", - "outputs": [ - { - "expected": "UPDATE orders SET status = CASE WHEN amount >= ? THEN ? ELSE ? END", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-basic.json deleted file mode 100644 index 5fe74fe1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Processed' WHERE order_date < GETDATE() - 30;", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE order_date < GETDATE ( ) - ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-complex-where.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-complex-where.json deleted file mode 100644 index 68e5c7fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-complex-where.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Review Needed' WHERE customer_id IN (SELECT id FROM customers WHERE last_order_date < GETDATE() - 365) AND status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE customer_id IN ( SELECT id FROM customers WHERE last_order_date < GETDATE ( ) - ? ) AND status = ?", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-from-aliases.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-from-aliases.json deleted file mode 100644 index b16fadd9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-from-aliases.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE o SET o.status = 'Completed' FROM orders o WHERE o.order_date > '2023-01-01' AND o.amount > 500;", - "outputs": [ - { - "expected": "UPDATE o SET o.status = ? FROM orders o WHERE o.order_date > ? AND o.amount > ?", - "statement_metadata": { - "size": 13, - "tables": ["o", "orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-join-top.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-join-top.json deleted file mode 100644 index 7d268717..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-join-top.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE TOP (5) o SET o.status = 'Pending Review' FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = 'North';", - "outputs": [ - { - "expected": "UPDATE TOP ( ? ) o SET o.status = ? FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-rowlock-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-rowlock-hint.json deleted file mode 100644 index f5a6d59f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-rowlock-hint.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders WITH (ROWLOCK) SET status = 'Processing' WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders WITH ( ROWLOCK ) SET status = ? WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-quoted-identifiers.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-quoted-identifiers.json deleted file mode 100644 index f2e62659..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-quoted-identifiers.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE [orders] SET [status] = 'Confirmed' WHERE [order_date] >= '2023-01-01';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE order_date >= ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-top.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-top.json deleted file mode 100644 index e2c36d09..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-top.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE TOP (10) orders SET status = 'Reviewed' WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE TOP ( ? ) orders SET status = ? WHERE status = ?", - "statement_metadata": { - "size": 6, - "tables": [], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-variable-store-value.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-variable-store-value.json deleted file mode 100644 index c7d21a31..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-variable-store-value.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE @maxDate DATETIME; SET @maxDate = (SELECT MAX(order_date) FROM orders); UPDATE orders SET status = 'Old Order' WHERE order_date < @maxDate;", - "outputs": [ - { - "expected": "DECLARE @maxDate DATETIME; SET @maxDate = ( SELECT MAX ( order_date ) FROM orders ); UPDATE orders SET status = ? WHERE order_date < @maxDate", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["SELECT", "UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-boolean-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-boolean-logic.json deleted file mode 100644 index 470f3955..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-boolean-logic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET is_priority = CASE WHEN total_amount > 1000 THEN 1 ELSE 0 END WHERE order_date > '2023-01-01';", - "outputs": [ - { - "expected": "UPDATE orders SET is_priority = CASE WHEN total_amount > ? THEN ? ELSE ? END WHERE order_date > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-case.json deleted file mode 100644 index b9a20bbc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CASE WHEN amount > 1000 THEN 'High Value' ELSE 'Standard' END WHERE order_date >= '2023-01-01';", - "outputs": [ - { - "expected": "UPDATE orders SET status = CASE WHEN amount > ? THEN ? ELSE ? END WHERE order_date >= ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-cte.json deleted file mode 100644 index c167ed39..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-cte.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH UpdatedOrders AS (SELECT id FROM orders WHERE order_date < GETDATE() - 30) UPDATE o SET o.status = 'Archived' FROM orders o JOIN UpdatedOrders uo ON o.id = uo.id;", - "outputs": [ - { - "expected": "WITH UpdatedOrders AS ( SELECT id FROM orders WHERE order_date < GETDATE ( ) - ? ) UPDATE o SET o.status = ? FROM orders o JOIN UpdatedOrders uo ON o.id = uo.id", - "statement_metadata": { - "size": 36, - "tables": ["orders", "o", "UpdatedOrders"], - "commands": ["SELECT", "UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-date-manipulation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-date-manipulation.json deleted file mode 100644 index 6a4fd85d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-date-manipulation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET due_date = DATEADD(day, 10, order_date) WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET due_date = DATEADD ( day, ?, order_date ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-join.json deleted file mode 100644 index e17a7422..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE o SET o.status = 'Dispatched' FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = 'West' AND o.status = 'Processed';", - "outputs": [ - { - "expected": "UPDATE o SET o.status = ? FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = ? AND o.status = ?", - "statement_metadata": { - "size": 26, - "tables": ["o", "orders", "customers"], - "commands": ["UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-named-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-named-variables.json deleted file mode 100644 index 67b84476..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-named-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE @status NVARCHAR(50); SET @status = 'Delayed'; UPDATE orders SET status = @status WHERE order_date < GETDATE() - 60;", - "outputs": [ - { - "expected": "DECLARE @status NVARCHAR ( ? ); SET @status = ?; UPDATE orders SET status = @status WHERE order_date < GETDATE ( ) - ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-null-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-null-handling.json deleted file mode 100644 index bcfce2b0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-null-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET delivery_date = NULLIF(order_date, due_date) WHERE status = 'Cancelled';", - "outputs": [ - { - "expected": "UPDATE orders SET delivery_date = NULLIF ( order_date, due_date ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-numeric-calculation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-numeric-calculation.json deleted file mode 100644 index 6b8ec4b7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-numeric-calculation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET total_amount = quantity * unit_price WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET total_amount = quantity * unit_price WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-output.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-output.json deleted file mode 100644 index a5edfb60..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-output.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Cancelled' OUTPUT deleted.id, deleted.status WHERE status = 'Pending' AND order_date < GETDATE() - 90;", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? OUTPUT deleted.id, deleted.status WHERE status = ? AND order_date < GETDATE ( ) - ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-string-concatenation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-string-concatenation.json deleted file mode 100644 index b81b8138..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-string-concatenation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET notes = CONCAT(notes, ' Updated on ', CONVERT(VARCHAR, GETDATE(), 101)) WHERE status = 'Shipped';", - "outputs": [ - { - "expected": "UPDATE orders SET notes = CONCAT ( notes, ?, CONVERT ( VARCHAR, GETDATE ( ), ? ) ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-subquery.json deleted file mode 100644 index 00d3be23..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'High Priority' WHERE id IN (SELECT order_id FROM order_details WHERE quantity > 10);", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE id IN ( SELECT order_id FROM order_details WHERE quantity > ? )", - "statement_metadata": { - "size": 31, - "tables": ["orders", "order_details"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-poorly-written-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-poorly-written-sql.json deleted file mode 100644 index 39a0d7fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-poorly-written-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT a.*, b.totalAmount, CASE WHEN c.id IS NOT NULL THEN d.description ELSE 'N/A' END AS description\n-- Joining table a with b to get total amounts. If c.id is not null, get description from d\nFROM (SELECT id, name, status, customer_id\n FROM orders\n WHERE order_date > DATE_ADD(CURDATE(), INTERVAL -6 MONTH)\n AND status IN ('Pending', 'Completed')\n AND customer_id IN (SELECT customer_id FROM customers WHERE region IN ('East', 'West') AND last_order_date > DATE_ADD(CURDATE(), INTERVAL -1 YEAR))\n ORDER BY name DESC) a\nINNER JOIN (SELECT order_id, SUM(amount) AS totalAmount FROM order_details GROUP BY order_id) b ON a.id = b.order_id\nLEFT JOIN audit_log c ON a.id = c.order_id\nLEFT JOIN (SELECT DISTINCT status, description FROM status_descriptions) d ON a.status = d.status\nWHERE a.name LIKE '%test%'\n-- Filtering on name containing 'test'\nAND (b.totalAmount > 1000 OR b.totalAmount IS NULL)\nORDER BY a.order_date DESC, a.name;", - "outputs": [ - { - "expected": "SELECT a. *, b.totalAmount, CASE WHEN c.id IS NOT ? THEN d.description ELSE ? END FROM ( SELECT id, name, status, customer_id FROM orders WHERE order_date > DATE_ADD ( CURDATE ( ), INTERVAL ? MONTH ) AND status IN ( ? ) AND customer_id IN ( SELECT customer_id FROM customers WHERE region IN ( ? ) AND last_order_date > DATE_ADD ( CURDATE ( ), INTERVAL ? YEAR ) ) ORDER BY name DESC ) a INNER JOIN ( SELECT order_id, SUM ( amount ) FROM order_details GROUP BY order_id ) b ON a.id = b.order_id LEFT JOIN audit_log c ON a.id = c.order_id LEFT JOIN ( SELECT DISTINCT status, description FROM status_descriptions ) d ON a.status = d.status WHERE a.name LIKE ? AND ( b.totalAmount > ? OR b.totalAmount IS ? ) ORDER BY a.order_date DESC, a.name", - "statement_metadata": { - "size": 195, - "tables": ["orders", "customers", "order_details", "audit_log", "status_descriptions"], - "commands": ["SELECT", "JOIN"], - "comments": ["-- Joining table a with b to get total amounts. If c.id is not null, get description from d", "-- Filtering on name containing 'test'"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-multiple-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-multiple-joins.json deleted file mode 100644 index e0bde0ba..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-multiple-joins.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT a.id, a.name, IFNULL(b.totalAmount, 0) AS totalAmount, c.comment, d.productCount, e.latestOrderDate\n-- Extremely complex query combining multiple joins, subqueries, and inline views\nFROM (SELECT id, name FROM customers WHERE status = 'Active') a\nJOIN (SELECT customer_id, SUM(amount) AS totalAmount FROM orders GROUP BY customer_id) b ON a.id = b.customer_id\nLEFT JOIN (SELECT customer_id, comment FROM customer_feedback WHERE rating = 5 ORDER BY feedback_date DESC LIMIT 1) c ON a.id = c.customer_id\nLEFT JOIN (SELECT customer_id, COUNT(*) AS productCount FROM order_details GROUP BY customer_id) d ON a.id = d.customer_id\nLEFT JOIN (SELECT customer_id, MAX(order_date) AS latestOrderDate FROM orders WHERE status IN ('Completed', 'Shipped') GROUP BY customer_id) e ON a.id = e.customer_id\nWHERE a.name LIKE '%Corp%' AND (b.totalAmount > 1000 OR d.productCount > 5)\nORDER BY a.name, totalAmount DESC;", - "outputs": [ - { - "expected": "SELECT a.id, a.name, IFNULL ( b.totalAmount, ? ), c.comment, d.productCount, e.latestOrderDate FROM ( SELECT id, name FROM customers WHERE status = ? ) a JOIN ( SELECT customer_id, SUM ( amount ) FROM orders GROUP BY customer_id ) b ON a.id = b.customer_id LEFT JOIN ( SELECT customer_id, comment FROM customer_feedback WHERE rating = ? ORDER BY feedback_date DESC LIMIT ? ) c ON a.id = c.customer_id LEFT JOIN ( SELECT customer_id, COUNT ( * ) FROM order_details GROUP BY customer_id ) d ON a.id = d.customer_id LEFT JOIN ( SELECT customer_id, MAX ( order_date ) FROM orders WHERE status IN ( ? ) GROUP BY customer_id ) e ON a.id = e.customer_id WHERE a.name LIKE ? AND ( b.totalAmount > ? OR d.productCount > ? ) ORDER BY a.name, totalAmount DESC", - "statement_metadata": { - "size": 136, - "tables": ["customers", "orders", "customer_feedback", "order_details"], - "commands": ["SELECT", "JOIN"], - "comments": ["-- Extremely complex query combining multiple joins, subqueries, and inline views"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-nested-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-nested-subqueries.json deleted file mode 100644 index a086430d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-nested-subqueries.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "SELECT t1.id, t1.status, t3.totalAmount, t4.commentsCount, CASE WHEN t5.latestCommentDate IS NOT NULL THEN t5.latestCommentDate ELSE 'No Comments' END AS latestComment\n-- Complex query joining multiple subqueries and using conditional logic\nFROM (SELECT id, status FROM orders WHERE customer_id IN (SELECT id FROM customers WHERE region = 'North') AND order_date > (SELECT MAX(order_date) FROM orders WHERE status = 'Pending')) t1\nJOIN (SELECT order_id, SUM(amount) AS totalAmount FROM order_details WHERE product_id IN (SELECT id FROM products WHERE name LIKE '%Premium%') GROUP BY order_id) t3 ON t1.id = t3.order_id\nLEFT JOIN (SELECT order_id, COUNT(*) AS commentsCount FROM order_comments GROUP BY order_id) t4 ON t1.id = t4.order_id\nLEFT JOIN (SELECT order_id, MAX(comment_date) AS latestCommentDate FROM order_comments WHERE comment LIKE '%urgent%' GROUP BY order_id) t5 ON t1.id = t5.order_id\nWHERE t1.status NOT IN ('Cancelled', 'Returned') AND (t3.totalAmount > 500 OR t4.commentsCount > 10)\nORDER BY t1.id, latestComment DESC;", - "outputs": [ - { - "expected": "SELECT t?.id, t?.status, t?.totalAmount, t?.commentsCount, CASE WHEN t?.latestCommentDate IS NOT ? THEN t?.latestCommentDate ELSE ? END FROM ( SELECT id, status FROM orders WHERE customer_id IN ( SELECT id FROM customers WHERE region = ? ) AND order_date > ( SELECT MAX ( order_date ) FROM orders WHERE status = ? ) ) t? JOIN ( SELECT order_id, SUM ( amount ) FROM order_details WHERE product_id IN ( SELECT id FROM products WHERE name LIKE ? ) GROUP BY order_id ) t? ON t?.id = t?.order_id LEFT JOIN ( SELECT order_id, COUNT ( * ) FROM order_comments GROUP BY order_id ) t? ON t?.id = t?.order_id LEFT JOIN ( SELECT order_id, MAX ( comment_date ) FROM order_comments WHERE comment LIKE ? GROUP BY order_id ) t? ON t?.id = t?.order_id WHERE t?.status NOT IN ( ? ) AND ( t?.totalAmount > ? OR t?.commentsCount > ? ) ORDER BY t?.id, latestComment DESC", - "statement_metadata": { - "size": 132, - "tables": ["orders", "customers", "order_details", "products", "order_comments"], - "commands": ["SELECT", "JOIN"], - "comments": ["-- Complex query joining multiple subqueries and using conditional logic"], - "procedures": [] - } - } - ] -} diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-basic.json deleted file mode 100644 index eb9025d8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Cancelled';", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascade.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascade.json deleted file mode 100644 index 2e4a3b54..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascade.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE region = 'North'; -- Assuming CASCADE DELETE is set up on the foreign key in the orders table", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE region = ?", - "statement_metadata": { - "size": 90, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": ["-- Assuming CASCADE DELETE is set up on the foreign key in the orders table"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascading-triggers.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascading-triggers.json deleted file mode 100644 index 6ebeb031..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascading-triggers.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE id = 1; -- Assumes a trigger exists for cascading delete to orders", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE id = ?", - "statement_metadata": { - "size": 73, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": ["-- Assumes a trigger exists for cascading delete to orders"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-conditional-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-conditional-logic.json deleted file mode 100644 index 27eb2283..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-conditional-logic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = IF(DAYOFWEEK(CURDATE()) = 1, 'Pending', 'Completed');", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = IF ( DAYOFWEEK ( CURDATE ( ) ) = ?, ?, ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-foreign-key-constraints.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-foreign-key-constraints.json deleted file mode 100644 index c5ab710f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-foreign-key-constraints.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE id IN (SELECT order_id FROM order_details WHERE quantity = 0);", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE id IN ( SELECT order_id FROM order_details WHERE quantity = ? )", - "statement_metadata": { - "size": 31, - "tables": ["orders", "order_details"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-free-disk-space.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-free-disk-space.json deleted file mode 100644 index b2507bb4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-free-disk-space.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE order_date < '2020-01-01'; OPTIMIZE TABLE orders;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE order_date < ?; OPTIMIZE TABLE orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-join-multiple-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-join-multiple-conditions.json deleted file mode 100644 index b6f629cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-join-multiple-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = 'Completed' AND c.region = 'South';", - "outputs": [ - { - "expected": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = ? AND c.region = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-lock-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-lock-tables.json deleted file mode 100644 index 84e1c59c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-lock-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "LOCK TABLES orders WRITE; DELETE FROM orders WHERE status = 'Failed'; UNLOCK TABLES;", - "outputs": [ - { - "expected": "LOCK TABLES orders WRITE; DELETE FROM orders WHERE status = ?; UNLOCK TABLES", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-multiple-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-multiple-tables.json deleted file mode 100644 index d90a73b9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-multiple-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE orders, order_details FROM orders INNER JOIN order_details ON orders.id = order_details.order_id WHERE orders.status = 'Obsolete';", - "outputs": [ - { - "expected": "DELETE orders, order_details FROM orders INNER JOIN order_details ON orders.id = order_details.order_id WHERE orders.status = ?", - "statement_metadata": { - "size": 29, - "tables": ["orders", "order_details"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-optimized-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-optimized-conditions.json deleted file mode 100644 index e521eb50..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-optimized-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Completed' AND order_date < DATE_SUB(NOW(), INTERVAL 1 YEAR);", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ? AND order_date < DATE_SUB ( NOW ( ), INTERVAL ? YEAR )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-order-by-limit.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-order-by-limit.json deleted file mode 100644 index 3f054e3c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-order-by-limit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Completed' ORDER BY order_date DESC LIMIT 5;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ? ORDER BY order_date DESC LIMIT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-range-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-range-conditions.json deleted file mode 100644 index 0c41e2d2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-range-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE amount BETWEEN 100 AND 500;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE amount BETWEEN ? AND ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-regular-expressions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-regular-expressions.json deleted file mode 100644 index 7fa55b7e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-regular-expressions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status REGEXP '^C.*';", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status REGEXP ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-safe-update-mode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-safe-update-mode.json deleted file mode 100644 index 9abb2814..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-safe-update-mode.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET SQL_SAFE_UPDATES = 0; DELETE FROM orders WHERE customer_id = 1; SET SQL_SAFE_UPDATES = 1;", - "outputs": [ - { - "expected": "SET SQL_SAFE_UPDATES = ?; DELETE FROM orders WHERE customer_id = ?; SET SQL_SAFE_UPDATES = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-subquery-optimization.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-subquery-optimization.json deleted file mode 100644 index 14944cec..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-subquery-optimization.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE id IN (SELECT id FROM orders WHERE status = 'Failed' LIMIT 10);", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE id IN ( SELECT id FROM orders WHERE status = ? LIMIT ? )", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-truncate.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-truncate.json deleted file mode 100644 index 6a7053a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-truncate.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "TRUNCATE TABLE order_details;", - "outputs": [ - { - "expected": "TRUNCATE TABLE order_details", - "statement_metadata": { - "size": 21, - "tables": ["order_details"], - "commands": ["TRUNCATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-using-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-using-subquery.json deleted file mode 100644 index 2d739ca4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-using-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE customer_id IN (SELECT id FROM customers WHERE region = 'West');", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE customer_id IN ( SELECT id FROM customers WHERE region = ? )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-join.json deleted file mode 100644 index c6def56a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE c.region = 'East' AND o.status = 'Pending';", - "outputs": [ - { - "expected": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE c.region = ? AND o.status = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-limit.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-limit.json deleted file mode 100644 index 60b99764..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-limit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Pending' LIMIT 10;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ? LIMIT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-user-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-user-variables.json deleted file mode 100644 index a0c41cc6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-user-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @max_id = (SELECT MAX(id) FROM orders); DELETE FROM orders WHERE id = @max_id;", - "outputs": [ - { - "expected": "SET @max_id = ( SELECT MAX ( id ) FROM orders ); DELETE FROM orders WHERE id = @max_id", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["SELECT", "DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/batch-insert-multiple-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/batch-insert-multiple-rows.json deleted file mode 100644 index 6ff42c67..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/batch-insert-multiple-rows.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status) VALUES (1, 'Pending'), (2, 'Completed'), (3, 'Processing');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status ) VALUES ( ? ), ( ? ), ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-auto-increment.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-auto-increment.json deleted file mode 100644 index 0c866869..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-auto-increment.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status) VALUES (3, 'Processing');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-basic.json deleted file mode 100644 index 7fce1641..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, order_date, status) VALUES (1, NOW(), 'Pending');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, order_date, status ) VALUES ( ?, NOW ( ), ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-blob-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-blob-data.json deleted file mode 100644 index 10f1fe22..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-blob-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, document) VALUES (5, 'Pending', LOAD_FILE('/path/to/file'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, document ) VALUES ( ?, LOAD_FILE ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-enum-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-enum-data.json deleted file mode 100644 index 2297c9b8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-enum-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, order_type) VALUES (7, 'Pending', 'Express');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, order_type ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-ignore.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-ignore.json deleted file mode 100644 index 706788ae..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-ignore.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT IGNORE INTO orders (id, customer_id, status) VALUES (1, 10, 'Cancelled');", - "outputs": [ - { - "expected": "INSERT IGNORE INTO orders ( id, customer_id, status ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-json-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-json-data.json deleted file mode 100644 index 43566686..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-json-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, details) VALUES (1, 'Pending', '{\"items\": [\"item1\", \"item2\"]}');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, details ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-on-duplicate-key.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-on-duplicate-key.json deleted file mode 100644 index 4dc3f78b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-on-duplicate-key.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (id, customer_id, status) VALUES (100, 2, 'Pending') ON DUPLICATE KEY UPDATE status = 'Pending';", - "outputs": [ - { - "expected": "INSERT INTO orders ( id, customer_id, status ) VALUES ( ? ) ON DUPLICATE KEY UPDATE status = ?", - "statement_metadata": { - "size": 24, - "tables": ["orders", "status"], - "commands": ["INSERT", "UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-select-union.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-select-union.json deleted file mode 100644 index f9a24172..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-select-union.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status) SELECT customer_id, status FROM archived_orders UNION ALL SELECT customer_id, status FROM special_orders;", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status ) SELECT customer_id, status FROM archived_orders UNION ALL SELECT customer_id, status FROM special_orders", - "statement_metadata": { - "size": 47, - "tables": ["orders", "archived_orders", "special_orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-spatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-spatial-data.json deleted file mode 100644 index e7a245ed..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-spatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, location) VALUES (6, 'Delivered', ST_GeomFromText('POINT(1 1)'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, location ) VALUES ( ?, ST_GeomFromText ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-last-insert-id.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-last-insert-id.json deleted file mode 100644 index 63eaa529..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-last-insert-id.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customers (name) VALUES ('John Doe'); INSERT INTO orders (customer_id, status) VALUES (LAST_INSERT_ID(), 'Pending');", - "outputs": [ - { - "expected": "INSERT INTO customers ( name ) VALUES ( ? ); INSERT INTO orders ( customer_id, status ) VALUES ( LAST_INSERT_ID ( ), ? )", - "statement_metadata": { - "size": 21, - "tables": ["customers", "orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-subquery.json deleted file mode 100644 index 03fbae9d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO order_audit (order_id, status) SELECT id, status FROM orders WHERE customer_id = 1;", - "outputs": [ - { - "expected": "INSERT INTO order_audit ( order_id, status ) SELECT id, status FROM orders WHERE customer_id = ?", - "statement_metadata": { - "size": 29, - "tables": ["order_audit", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-conditional-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-conditional-logic.json deleted file mode 100644 index 61d60872..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-conditional-logic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, amount) SELECT id, 'New', IF(region = 'West', 100, 50) FROM customers;", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, amount ) SELECT id, ?, IF ( region = ?, ?, ? ) FROM customers", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-curdate-curtime.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-curdate-curtime.json deleted file mode 100644 index 1eb52dac..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-curdate-curtime.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, order_date, order_time) VALUES (15, 'Pending', CURDATE(), CURTIME());", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, order_date, order_time ) VALUES ( ?, CURDATE ( ), CURTIME ( ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-encryption-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-encryption-functions.json deleted file mode 100644 index 40427ead..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-encryption-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, encrypted_note) VALUES (13, 'Pending', AES_ENCRYPT('Confidential note', 'encryption_key'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, encrypted_note ) VALUES ( ?, AES_ENCRYPT ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-generated-columns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-generated-columns.json deleted file mode 100644 index 7e6b9f55..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-generated-columns.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, total_incl_tax) VALUES (12, 'Pending', 150); -- total_incl_tax is a generated column", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, total_incl_tax ) VALUES ( ? )", - "statement_metadata": { - "size": 51, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": ["-- total_incl_tax is a generated column"], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-replace.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-replace.json deleted file mode 100644 index 651ee691..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-replace.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "REPLACE INTO orders (id, customer_id, status) VALUES (1, 9, 'Completed');", - "outputs": [ - { - "expected": "REPLACE INTO orders ( id, customer_id, status ) VALUES ( ? )", - "statement_metadata": { - "size": 6, - "tables": ["orders"], - "commands": [], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-set-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-set-syntax.json deleted file mode 100644 index 21afae77..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-set-syntax.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders SET customer_id = 8, status = 'Processing', order_date = CURDATE();", - "outputs": [ - { - "expected": "INSERT INTO orders SET customer_id = ?, status = ?, order_date = CURDATE ( )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-spatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-spatial-data.json deleted file mode 100644 index 4059132b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-spatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, location) VALUES (14, 'Pending', ST_GeomFromText('POINT(1 1)'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, location ) VALUES ( ?, ST_GeomFromText ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-timestamp.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-timestamp.json deleted file mode 100644 index e93ee978..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-timestamp.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, created_at) VALUES (4, 'Shipped', CURRENT_TIMESTAMP);", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, created_at ) VALUES ( ?, CURRENT_TIMESTAMP )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/complex-procedure-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/complex-procedure-error-handling.json deleted file mode 100644 index 7137fbbf..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/complex-procedure-error-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE UpdateOrderStatus(IN orderId INT, IN newStatus VARCHAR(20)) BEGIN\n DECLARE EXIT HANDLER FOR SQLEXCEPTION\n BEGIN\n -- Handle error\n ROLLBACK;\n END;\n START TRANSACTION;\n UPDATE orders SET status = newStatus WHERE id = orderId;\n IF ROW_COUNT() = 0 THEN\n SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'No rows updated';\n END IF;\n COMMIT;\n END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE UpdateOrderStatus ( IN orderId INT, IN newStatus VARCHAR ( ? ) ) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN ROLLBACK; END; START TRANSACTION; UPDATE orders SET status = newStatus WHERE id = orderId; IF ROW_COUNT ( ) = ? THEN SIGNAL SQLSTATE ? SET MESSAGE_TEXT = ?; END IF; COMMIT; END", - "statement_metadata": { - "size": 61, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE", "COMMIT"], - "comments": ["-- Handle error"], - "procedures": ["UpdateOrderStatus"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-basic.json deleted file mode 100644 index e8487b6b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-basic.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE GetAllOrders() BEGIN SELECT * FROM orders; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE GetAllOrders ( ) BEGIN SELECT * FROM orders; END", - "statement_metadata": { - "size": 35, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["GetAllOrders"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-conditional-logic-loop.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-conditional-logic-loop.json deleted file mode 100644 index cb2b88ef..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-conditional-logic-loop.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE ProcessOrders() BEGIN\n DECLARE done INT DEFAULT 0;\n DECLARE a INT;\n DECLARE cur1 CURSOR FOR SELECT id FROM orders WHERE status = 'Pending';\n DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = 1;\n OPEN cur1;\n read_loop: LOOP\n FETCH cur1 INTO a;\n IF done THEN\n LEAVE read_loop;\n END IF;\n UPDATE orders SET status = 'Processing' WHERE id = a;\n END LOOP;\n CLOSE cur1;\n END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE ProcessOrders ( ) BEGIN DECLARE done INT DEFAULT ?; DECLARE a INT; DECLARE cur? CURSOR FOR SELECT id FROM orders WHERE status = ?; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = ?; OPEN cur?; read_loop : LOOP FETCH cur? INTO a; IF done THEN LEAVE read_loop; END IF; UPDATE orders SET status = ? WHERE id = a; END LOOP; CLOSE cur?; END", - "statement_metadata": { - "size": 43, - "tables": ["orders", "a"], - "commands": ["CREATE", "BEGIN", "SELECT", "UPDATE"], - "comments": [], - "procedures": ["ProcessOrders"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-cursor.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-cursor.json deleted file mode 100644 index d0e3462b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-cursor.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE FetchOrders() BEGIN DECLARE done INT DEFAULT FALSE; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE; OPEN cur; read_loop: LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; /* process each order */ END LOOP; CLOSE cur; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE FetchOrders ( ) BEGIN DECLARE done INT DEFAULT ?; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = ?; OPEN cur; read_loop : LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; END LOOP; CLOSE cur; END", - "statement_metadata": { - "size": 66, - "tables": ["orders", "order_id"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": ["/* process each order */"], - "procedures": ["FetchOrders"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-dynamic-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-dynamic-sql.json deleted file mode 100644 index 42128dc7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-dynamic-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE DynamicQuery(IN tbl_name VARCHAR(50)) BEGIN SET @s = CONCAT('SELECT * FROM ', tbl_name); PREPARE stmt FROM @s; EXECUTE stmt; DEALLOCATE PREPARE stmt; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE DynamicQuery ( IN tbl_name VARCHAR ( ? ) ) BEGIN SET @s = CONCAT ( ?, tbl_name ); PREPARE stmt FROM @s; EXECUTE stmt; DEALLOCATE PREPARE stmt; END", - "statement_metadata": { - "size": 30, - "tables": [], - "commands": ["CREATE", "BEGIN", "EXECUTE"], - "comments": [], - "procedures": ["DynamicQuery"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-error-handling.json deleted file mode 100644 index 82fdd16b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-error-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE SafeUpdate(IN order_id INT, IN new_status VARCHAR(50)) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN -- handle error\n SET @error = 'An error occurred'; END; UPDATE orders SET status = new_status WHERE id = order_id; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE SafeUpdate ( IN order_id INT, IN new_status VARCHAR ( ? ) ) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN SET @error = ?; END; UPDATE orders SET status = new_status WHERE id = order_id; END", - "statement_metadata": { - "size": 48, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE"], - "comments": ["-- handle error"], - "procedures": ["SafeUpdate"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-input-output-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-input-output-parameters.json deleted file mode 100644 index 26711350..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-input-output-parameters.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE GetTotalOrders(OUT total INT) BEGIN SELECT COUNT(*) INTO total FROM orders; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE GetTotalOrders ( OUT total INT ) BEGIN SELECT COUNT ( * ) INTO total FROM orders; END", - "statement_metadata": { - "size": 42, - "tables": ["total", "orders"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["GetTotalOrders"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-loop-control.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-loop-control.json deleted file mode 100644 index fc8f688d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-loop-control.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE ProcessOrders() BEGIN DECLARE done INT DEFAULT FALSE; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE; OPEN cur; read_loop: LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; UPDATE orders SET status = 'Processed' WHERE id = order_id; END LOOP; CLOSE cur; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE ProcessOrders ( ) BEGIN DECLARE done INT DEFAULT ?; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = ?; OPEN cur; read_loop : LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; UPDATE orders SET status = ? WHERE id = order_id; END LOOP; CLOSE cur; END", - "statement_metadata": { - "size": 50, - "tables": ["orders", "order_id"], - "commands": ["CREATE", "BEGIN", "SELECT", "UPDATE"], - "comments": [], - "procedures": ["ProcessOrders"] - } - } - ] -} diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-parameters.json deleted file mode 100644 index fc98aa4f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-parameters.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE GetOrdersByStatus(IN status VARCHAR(20)) BEGIN SELECT * FROM orders WHERE orders.status = status; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE GetOrdersByStatus ( IN status VARCHAR ( ? ) ) BEGIN SELECT * FROM orders WHERE orders.status = status; END", - "statement_metadata": { - "size": 40, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["GetOrdersByStatus"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-transaction-management.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-transaction-management.json deleted file mode 100644 index 75aace98..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-transaction-management.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE UpdateOrderTransaction(IN order_id INT, IN new_status VARCHAR(50)) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN ROLLBACK; END; START TRANSACTION; UPDATE orders SET status = new_status WHERE id = order_id; COMMIT; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE UpdateOrderTransaction ( IN order_id INT, IN new_status VARCHAR ( ? ) ) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN ROLLBACK; END; START TRANSACTION; UPDATE orders SET status = new_status WHERE id = order_id; COMMIT; END", - "statement_metadata": { - "size": 51, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE", "COMMIT"], - "comments": [], - "procedures": ["UpdateOrderTransaction"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/bit-data-type.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/bit-data-type.json deleted file mode 100644 index c39da7ed..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/bit-data-type.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, status, (is_paid & 1) AS isPaidFlag FROM orders;", - "outputs": [ - { - "expected": "SELECT id, status, ( is_paid & ? ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/blob-text-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/blob-text-data-types.json deleted file mode 100644 index d80018d4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/blob-text-data-types.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, SUBSTRING(order_notes, 1, 100) AS short_notes FROM orders WHERE LENGTH(document_blob) > 1024;", - "outputs": [ - { - "expected": "SELECT id, SUBSTRING ( order_notes, ?, ? ) FROM orders WHERE LENGTH ( document_blob ) > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/decimal-data-type.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/decimal-data-type.json deleted file mode 100644 index e3b65579..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/decimal-data-type.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ROUND(total_amount, 2) AS rounded_total FROM orders;", - "outputs": [ - { - "expected": "SELECT id, ROUND ( total_amount, ? ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/enum-set-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/enum-set-data-types.json deleted file mode 100644 index cfccc93b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/enum-set-data-types.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, order_type, status_flags FROM order_details WHERE order_type = 'Standard' AND FIND_IN_SET('urgent', status_flags);", - "outputs": [ - { - "expected": "SELECT id, order_type, status_flags FROM order_details WHERE order_type = ? AND FIND_IN_SET ( ?, status_flags )", - "statement_metadata": { - "size": 19, - "tables": ["order_details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/full-text-search-innodb.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/full-text-search-innodb.json deleted file mode 100644 index 1374d833..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/full-text-search-innodb.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE FULLTEXT INDEX ft_index ON orders (description); SELECT * FROM orders WHERE MATCH(description) AGAINST ('+delivery -return' IN BOOLEAN MODE);", - "outputs": [ - { - "expected": "CREATE FULLTEXT INDEX ft_index ON orders ( description ); SELECT * FROM orders WHERE MATCH ( description ) AGAINST ( ? IN BOOLEAN MODE )", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["CREATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-aggregate-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-aggregate-functions.json deleted file mode 100644 index a8880b7d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-aggregate-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT customer_id, COUNT(*) AS total_orders FROM orders GROUP BY customer_id HAVING COUNT(*) > 5;", - "outputs": [ - { - "expected": "SELECT customer_id, COUNT ( * ) FROM orders GROUP BY customer_id HAVING COUNT ( * ) > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-basic.json deleted file mode 100644 index be6c6062..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, customer_id, order_date, status FROM orders WHERE status = 'Pending';", - "outputs": [ - { - "expected": "SELECT id, customer_id, order_date, status FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-case-statement.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-case-statement.json deleted file mode 100644 index 68202df4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-case-statement.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, CASE WHEN status = 'Pending' THEN 'P' WHEN status = 'Completed' THEN 'C' ELSE 'Other' END AS status_code FROM orders;", - "outputs": [ - { - "expected": "SELECT id, CASE WHEN status = ? THEN ? WHEN status = ? THEN ? ELSE ? END FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-coalesce-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-coalesce-function.json deleted file mode 100644 index d6dc561d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-coalesce-function.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, COALESCE(comments, 'No comments') AS order_comments FROM orders;", - "outputs": [ - { - "expected": "SELECT id, COALESCE ( comments, ? ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-conditional-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-conditional-case.json deleted file mode 100644 index 68202df4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-conditional-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, CASE WHEN status = 'Pending' THEN 'P' WHEN status = 'Completed' THEN 'C' ELSE 'Other' END AS status_code FROM orders;", - "outputs": [ - { - "expected": "SELECT id, CASE WHEN status = ? THEN ? WHEN status = ? THEN ? ELSE ? END FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-date-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-date-functions.json deleted file mode 100644 index 482f9a26..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-date-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, YEAR(order_date) AS order_year FROM orders WHERE MONTH(order_date) = 1;", - "outputs": [ - { - "expected": "SELECT id, YEAR ( order_date ) FROM orders WHERE MONTH ( order_date ) = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-distinct.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-distinct.json deleted file mode 100644 index 6552154c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-distinct.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT DISTINCT status FROM orders;", - "outputs": [ - { - "expected": "SELECT DISTINCT status FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-full-text-search.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-full-text-search.json deleted file mode 100644 index c5dfd177..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-full-text-search.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, MATCH (description) AGAINST ('+shipping -delayed' IN BOOLEAN MODE) AS score FROM orders WHERE MATCH (description) AGAINST ('+shipping -delayed' IN BOOLEAN MODE) > 0;", - "outputs": [ - { - "expected": "SELECT id, MATCH ( description ) AGAINST ( ? IN BOOLEAN MODE ) FROM orders WHERE MATCH ( description ) AGAINST ( ? IN BOOLEAN MODE ) > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-geospatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-geospatial-data.json deleted file mode 100644 index cb807f36..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-geospatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ST_AsText(location) AS location FROM orders WHERE ST_Distance_Sphere(location, ST_GeomFromText('POINT(10 20)')) < 10000;", - "outputs": [ - { - "expected": "SELECT id, ST_AsText ( location ) FROM orders WHERE ST_Distance_Sphere ( location, ST_GeomFromText ( ? ) ) < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-group-concat.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-group-concat.json deleted file mode 100644 index 50c4d103..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-group-concat.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT customer_id, GROUP_CONCAT(status ORDER BY order_date DESC SEPARATOR ', ') AS order_statuses FROM orders GROUP BY customer_id;", - "outputs": [ - { - "expected": "SELECT customer_id, GROUP_CONCAT ( status ORDER BY order_date DESC SEPARATOR ? ) FROM orders GROUP BY customer_id", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join-aliases.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join-aliases.json deleted file mode 100644 index 54f27bf1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join-aliases.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT o.id, c.name AS customer_name, o.status FROM orders o LEFT JOIN customers c ON o.customer_id = c.id;", - "outputs": [ - { - "expected": "SELECT o.id, c.name, o.status FROM orders o LEFT JOIN customers c ON o.customer_id = c.id", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join.json deleted file mode 100644 index 5aa1860f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT o.id, c.name, o.status FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = 'Completed';", - "outputs": [ - { - "expected": "SELECT o.id, c.name, o.status FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-json-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-json-functions.json deleted file mode 100644 index 1e550315..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-json-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, JSON_EXTRACT(order_details, '$.items[0].name') AS first_item_name FROM orders WHERE JSON_CONTAINS(order_details, '\"Active\"', '$.status');", - "outputs": [ - { - "expected": "SELECT id, JSON_EXTRACT ( order_details, ? ) FROM orders WHERE JSON_CONTAINS ( order_details, ?, ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-limit-offset.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-limit-offset.json deleted file mode 100644 index d7af084b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-limit-offset.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders ORDER BY order_date DESC LIMIT 10 OFFSET 5;", - "outputs": [ - { - "expected": "SELECT * FROM orders ORDER BY order_date DESC LIMIT ? OFFSET ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-lock-in-share-mode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-lock-in-share-mode.json deleted file mode 100644 index 703bf310..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-lock-in-share-mode.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders WHERE status = 'Pending' LOCK IN SHARE MODE;", - "outputs": [ - { - "expected": "SELECT * FROM orders WHERE status = ? LOCK IN SHARE MODE", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-natural-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-natural-join.json deleted file mode 100644 index 9e526391..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-natural-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders NATURAL JOIN customers;", - "outputs": [ - { - "expected": "SELECT * FROM orders NATURAL JOIN customers", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-parameter-binding.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-parameter-binding.json deleted file mode 100644 index 4db2716b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-parameter-binding.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, status FROM orders WHERE customer_id = ?;", - "outputs": [ - { - "expected": "SELECT id, status FROM orders WHERE customer_id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-regex.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-regex.json deleted file mode 100644 index 969c50c6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-regex.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, customer_id FROM orders WHERE status REGEXP '^Comp.*';", - "outputs": [ - { - "expected": "SELECT id, customer_id FROM orders WHERE status REGEXP ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-straight-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-straight-join.json deleted file mode 100644 index eba10c9f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-straight-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders STRAIGHT_JOIN customers ON orders.customer_id = customers.id;", - "outputs": [ - { - "expected": "SELECT * FROM orders STRAIGHT_JOIN customers ON orders.customer_id = customers.id", - "statement_metadata": { - "size": 34, - "tables": ["orders", "customers"], - "commands": ["SELECT", "STRAIGHT_JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-string-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-string-functions.json deleted file mode 100644 index 6e3f96c1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-string-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, UPPER(status) AS status_upper FROM orders;", - "outputs": [ - { - "expected": "SELECT id, UPPER ( status ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-subquery.json deleted file mode 100644 index 29290726..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, (SELECT name FROM customers WHERE id = orders.customer_id) AS customer_name FROM orders;", - "outputs": [ - { - "expected": "SELECT id, ( SELECT name FROM customers WHERE id = orders.customer_id ) FROM orders", - "statement_metadata": { - "size": 21, - "tables": ["customers", "orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-user-defined-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-user-defined-variables.json deleted file mode 100644 index 2593abf9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-user-defined-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @orderRank := 0; SELECT @orderRank := @orderRank + 1 AS rank, id, status FROM orders ORDER BY id;", - "outputs": [ - { - "expected": "SET @orderRank := ?; SELECT @orderRank := @orderRank + ?, id, status FROM orders ORDER BY id", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-variable-assignment.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-variable-assignment.json deleted file mode 100644 index 44f417c3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-variable-assignment.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT @orderCount := COUNT(*) FROM orders WHERE status = 'Completed';", - "outputs": [ - { - "expected": "SELECT @orderCount := COUNT ( * ) FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-window-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-window-functions.json deleted file mode 100644 index 981fa153..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-window-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, status, RANK() OVER (PARTITION BY customer_id ORDER BY order_date DESC) AS rank FROM orders;", - "outputs": [ - { - "expected": "SELECT id, status, RANK ( ) OVER ( PARTITION BY customer_id ORDER BY order_date DESC ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-data-types-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-data-types-functions.json deleted file mode 100644 index cb807f36..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-data-types-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ST_AsText(location) AS location FROM orders WHERE ST_Distance_Sphere(location, ST_GeomFromText('POINT(10 20)')) < 10000;", - "outputs": [ - { - "expected": "SELECT id, ST_AsText ( location ) FROM orders WHERE ST_Distance_Sphere ( location, ST_GeomFromText ( ? ) ) < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-geometry-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-geometry-data-types.json deleted file mode 100644 index 75160756..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-geometry-data-types.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ST_AsText(location) FROM orders WHERE ST_Distance(location, ST_GeomFromText('POINT(1 1)')) < 100;", - "outputs": [ - { - "expected": "SELECT id, ST_AsText ( location ) FROM orders WHERE ST_Distance ( location, ST_GeomFromText ( ? ) ) < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/system-versioned-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/system-versioned-tables.json deleted file mode 100644 index ee383187..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/system-versioned-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE orders_with_history (id INT, status VARCHAR(20)) WITH SYSTEM VERSIONING;", - "outputs": [ - { - "expected": "CREATE TABLE orders_with_history ( id INT, status VARCHAR ( ? ) ) WITH SYSTEM VERSIONING", - "statement_metadata": { - "size": 25, - "tables": ["orders_with_history"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/using-temporary-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/using-temporary-tables.json deleted file mode 100644 index dd436566..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/using-temporary-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TEMPORARY TABLE temp_orders SELECT * FROM orders; SELECT * FROM temp_orders WHERE status = 'Pending'; DROP TEMPORARY TABLE temp_orders;", - "outputs": [ - { - "expected": "CREATE TEMPORARY TABLE temp_orders SELECT * FROM orders; SELECT * FROM temp_orders WHERE status = ?; DROP TEMPORARY TABLE temp_orders", - "statement_metadata": { - "size": 33, - "tables": ["temp_orders", "orders"], - "commands": ["CREATE", "SELECT", "DROP"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/virtual-generated-columns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/virtual-generated-columns.json deleted file mode 100644 index a03d91de..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/virtual-generated-columns.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE orders_with_virtual (id INT, amount DECIMAL(10, 2), total_incl_tax DECIMAL(10, 2) GENERATED ALWAYS AS (amount * 1.1) STORED);", - "outputs": [ - { - "expected": "CREATE TABLE orders_with_virtual ( id INT, amount DECIMAL ( ? ), total_incl_tax DECIMAL ( ? ) GENERATED ALWAYS AS ( amount * ? ) STORED )", - "statement_metadata": { - "size": 25, - "tables": ["orders_with_virtual"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/bulk-update-multiple-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/bulk-update-multiple-conditions.json deleted file mode 100644 index 2e236f7a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/bulk-update-multiple-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = IF(amount > 1000, 'High Value', 'Regular'), order_date = IF(status = 'Pending', CURDATE(), order_date);", - "outputs": [ - { - "expected": "UPDATE orders SET status = IF ( amount > ?, ?, ? ), order_date = IF ( status = ?, CURDATE ( ), order_date )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/conditional-update-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/conditional-update-case.json deleted file mode 100644 index 38bfb7a0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/conditional-update-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CASE WHEN amount > 100 THEN 'High Value' ELSE 'Regular' END;", - "outputs": [ - { - "expected": "UPDATE orders SET status = CASE WHEN amount > ? THEN ? ELSE ? END", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-basic.json deleted file mode 100644 index 480f7a79..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-basic.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Completed' WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-case-aggregate-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-case-aggregate-functions.json deleted file mode 100644 index adf83e96..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-case-aggregate-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders o SET o.status = CASE WHEN avg_amount > 500 THEN 'High' ELSE 'Low' END FROM (SELECT customer_id, AVG(amount) as avg_amount FROM orders GROUP BY customer_id) a WHERE o.customer_id = a.customer_id;", - "outputs": [ - { - "expected": "UPDATE orders o SET o.status = CASE WHEN avg_amount > ? THEN ? ELSE ? END FROM ( SELECT customer_id, AVG ( amount ) FROM orders GROUP BY customer_id ) a WHERE o.customer_id = a.customer_id", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-date-time-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-date-time-functions.json deleted file mode 100644 index 3155a4aa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-date-time-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET order_date = CURDATE(), order_time = CURTIME() WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET order_date = CURDATE ( ), order_time = CURTIME ( ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-encryption-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-encryption-functions.json deleted file mode 100644 index 06ebd690..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-encryption-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET encrypted_note = AES_ENCRYPT('Confidential', 'key') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET encrypted_note = AES_ENCRYPT ( ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-enum-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-enum-data.json deleted file mode 100644 index bd645304..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-enum-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET order_type = 'Standard' WHERE order_type = 'Express';", - "outputs": [ - { - "expected": "UPDATE orders SET order_type = ? WHERE order_type = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-functions.json deleted file mode 100644 index d28de61b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET details = JSON_SET(details, '$.shippingMethod', 'Express') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET details = JSON_SET ( details, ?, ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-modify.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-modify.json deleted file mode 100644 index 56cb60e0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-modify.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET details = JSON_SET(details, '$.status', 'Updated') WHERE JSON_EXTRACT(details, '$.priority') = 'High';", - "outputs": [ - { - "expected": "UPDATE orders SET details = JSON_SET ( details, ?, ? ) WHERE JSON_EXTRACT ( details, ? ) = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-lock-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-lock-tables.json deleted file mode 100644 index 1aa721bc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-lock-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "LOCK TABLES orders WRITE; UPDATE orders SET status = 'Cancelled' WHERE status = 'Pending'; UNLOCK TABLES;", - "outputs": [ - { - "expected": "LOCK TABLES orders WRITE; UPDATE orders SET status = ? WHERE status = ?; UNLOCK TABLES", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-math-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-math-functions.json deleted file mode 100644 index c5fe1b53..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-math-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET amount = amount * 1.1 WHERE status = 'Completed';", - "outputs": [ - { - "expected": "UPDATE orders SET amount = amount * ? WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-optimizing-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-optimizing-conditions.json deleted file mode 100644 index 56f3924b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-optimizing-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Archived' WHERE status = 'Completed' AND order_date < DATE_SUB(NOW(), INTERVAL 1 YEAR);", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status = ? AND order_date < DATE_SUB ( NOW ( ), INTERVAL ? YEAR )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-order-by-limit.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-order-by-limit.json deleted file mode 100644 index 28f2fa3a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-order-by-limit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Cancelled' WHERE status = 'Pending' ORDER BY order_date ASC LIMIT 10;", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status = ? ORDER BY order_date ASC LIMIT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-regular-expressions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-regular-expressions.json deleted file mode 100644 index 2863fb28..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-regular-expressions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Query' WHERE status REGEXP '^Q.*';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status REGEXP ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-spatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-spatial-data.json deleted file mode 100644 index e93dd7b3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-spatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET location = ST_GeomFromText('POINT(1 1)') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET location = ST_GeomFromText ( ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-string-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-string-functions.json deleted file mode 100644 index 6a219694..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-string-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CONCAT(status, ' - Updated') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET status = CONCAT ( status, ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-user-defined-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-user-defined-variables.json deleted file mode 100644 index b204f0ca..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-user-defined-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @new_status = 'Delayed'; UPDATE orders SET status = @new_status WHERE status = 'Pending';", - "outputs": [ - { - "expected": "SET @new_status = ?; UPDATE orders SET status = @new_status WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-using-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-using-variables.json deleted file mode 100644 index e79e587f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-using-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @new_status = 'Shipped'; UPDATE orders SET status = @new_status WHERE status = 'Processing';", - "outputs": [ - { - "expected": "SET @new_status = ?; UPDATE orders SET status = @new_status WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-join.json deleted file mode 100644 index 81da8f15..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders o JOIN customers c ON o.customer_id = c.id SET o.status = 'Processing' WHERE c.region = 'East';", - "outputs": [ - { - "expected": "UPDATE orders o JOIN customers c ON o.customer_id = c.id SET o.status = ? WHERE c.region = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-subquery.json deleted file mode 100644 index 9cdd8707..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Archived' WHERE id IN (SELECT id FROM orders WHERE order_date < '2020-01-01');", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE id IN ( SELECT id FROM orders WHERE order_date < ? )", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/bulk-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/bulk-operations.json deleted file mode 100644 index ea1e6291..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/bulk-operations.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "DECLARE TYPE EmpTabTyp IS TABLE OF employees%ROWTYPE INDEX BY PLS_INTEGER; emp_tab EmpTabTyp; BEGIN SELECT * BULK COLLECT INTO emp_tab FROM employees; FORALL i IN emp_tab.FIRST .. emp_tab.LAST SAVE EXCEPTIONS UPDATE employees SET test = test * 1.05 WHERE employee_id = emp_tab(i).employee_id; END;", - "outputs": [ - { - "expected": "DECLARE TYPE EmpTabTyp IS TABLE OF employees % ROWTYPE INDEX BY PLS_INTEGER; emp_tab EmpTabTyp; BEGIN SELECT * BULK COLLECT INTO emp_tab FROM employees; FORALL i IN emp_tab.FIRST . . emp_tab.LAST SAVE EXCEPTIONS UPDATE employees SET test = test * ? WHERE employee_id = emp_tab(i) . employee_id; END;", - "statement_metadata": { - "size": 33, - "tables": ["emp_tab", "employees"], - "commands": ["BEGIN", "SELECT", "UPDATE"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-multi-table-delete.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-multi-table-delete.json deleted file mode 100644 index 234ce92e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-multi-table-delete.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders o WHERE o.customer_id IN (SELECT c.id FROM customers c WHERE NOT EXISTS (SELECT 1 FROM customer_orders co WHERE co.customer_id = c.id AND co.order_date > SYSDATE - 365)) AND EXISTS (SELECT 1 FROM order_items oi WHERE oi.order_id = o.id AND oi.product_id IN (SELECT p.id FROM products p WHERE p.category = 'Obsolete'));", - "outputs": [ - { - "expected": "DELETE FROM orders o WHERE o.customer_id IN ( SELECT c.id FROM customers c WHERE NOT EXISTS ( SELECT ? FROM customer_orders co WHERE co.customer_id = c.id AND co.order_date > SYSDATE - ? ) ) AND EXISTS ( SELECT ? FROM order_items oi WHERE oi.order_id = o.id AND oi.product_id IN ( SELECT p.id FROM products p WHERE p.category = ? ) )", - "statement_metadata": { - "size": 61, - "tables": ["orders", "customers", "customer_orders", "order_items", "products"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-nested-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-nested-subqueries.json deleted file mode 100644 index 8ef8b456..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-nested-subqueries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT e.employee_id, (SELECT MAX(s.yoe) FROM employees s WHERE s.department_id = e.department_id) AS max_dept_yoe FROM employees e WHERE EXISTS (SELECT 1 FROM departments d WHERE d.id = e.department_id AND d.budget > (SELECT AVG(budget) FROM departments)) ORDER BY e.department_id, e.employee_id;", - "outputs": [ - { - "expected": "SELECT e.employee_id, ( SELECT MAX ( s.yoe ) FROM employees s WHERE s.department_id = e.department_id ) FROM employees e WHERE EXISTS ( SELECT ? FROM departments d WHERE d.id = e.department_id AND d.budget > ( SELECT AVG ( budget ) FROM departments ) ) ORDER BY e.department_id, e.employee_id", - "statement_metadata": { - "size": 26, - "tables": ["employees", "departments"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-select-aggregates-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-select-aggregates-joins.json deleted file mode 100644 index c3375c00..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-select-aggregates-joins.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT u.id, u.name, COUNT(o.id) AS order_count, AVG(o.total) AS average_order FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = 'active' GROUP BY u.id, u.name HAVING COUNT(o.id) > 5;", - "outputs": [ - { - "expected": "SELECT u.id, u.name, COUNT ( o.id ), AVG ( o.total ) FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = ? GROUP BY u.id, u.name HAVING COUNT ( o.id ) > ?", - "statement_metadata": { - "size": 21, - "tables": ["users", "orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT u.id, u.name, COUNT(o.id), AVG(o.total) FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = ? GROUP BY u.id, u.name HAVING COUNT(o.id) > ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-oracle-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-oracle-query.json deleted file mode 100644 index 3c178aa4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-oracle-query.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH RECURSIVE sales_cte (product_id, total_sales, sales_rank) AS (SELECT product_id, SUM(amount), RANK() OVER (ORDER BY SUM(amount) DESC) FROM sales GROUP BY product_id UNION ALL SELECT s.product_id, s.total_sales, s.sales_rank FROM sales s JOIN sales_cte sc ON s.product_id = sc.product_id WHERE s.amount > 1000), complex_view AS (SELECT e.employee_id, e.department_id, e.test_amt, AVG(e.test_amt) OVER (PARTITION BY e.department_id) AS avg_dept_test_amt, d.department_name, d.manager_id, (SELECT MAX(p.price) FROM products p WHERE p.department_id = e.department_id) AS max_product_price FROM employees e JOIN departments d ON e.department_id = d.id WHERE e.hire_date > SYSDATE - INTERVAL '10' YEAR) SELECT cv.*, sc.total_sales, sc.sales_rank FROM complex_view cv LEFT JOIN sales_cte sc ON cv.department_id = sc.product_id WHERE cv.avg_dept_test_amt > (SELECT AVG(total_sal) FROM (SELECT department_id, SUM(test_amt) AS total_sal FROM employees GROUP BY department_id)) AND EXISTS (SELECT 1 FROM customer_orders co WHERE co.employee_id = cv.employee_id AND co.order_status = 'Completed') ORDER BY cv.department_id, cv.test_amt DESC;", - "outputs": [ - { - "expected": "WITH RECURSIVE sales_cte ( product_id, total_sales, sales_rank ) AS ( SELECT product_id, SUM ( amount ), RANK ( ) OVER ( ORDER BY SUM ( amount ) DESC ) FROM sales GROUP BY product_id UNION ALL SELECT s.product_id, s.total_sales, s.sales_rank FROM sales s JOIN sales_cte sc ON s.product_id = sc.product_id WHERE s.amount > ? ), complex_view AS ( SELECT e.employee_id, e.department_id, e.test_amt, AVG ( e.test_amt ) OVER ( PARTITION BY e.department_id ), d.department_name, d.manager_id, ( SELECT MAX ( p.price ) FROM products p WHERE p.department_id = e.department_id ) FROM employees e JOIN departments d ON e.department_id = d.id WHERE e.hire_date > SYSDATE - INTERVAL ? YEAR ) SELECT cv. *, sc.total_sales, sc.sales_rank FROM complex_view cv LEFT JOIN sales_cte sc ON cv.department_id = sc.product_id WHERE cv.avg_dept_test_amt > ( SELECT AVG ( total_sal ) FROM ( SELECT department_id, SUM ( test_amt ) FROM employees GROUP BY department_id ) ) AND EXISTS ( SELECT ? FROM customer_orders co WHERE co.employee_id = cv.employee_id AND co.order_status = ? ) ORDER BY cv.department_id, cv.test_amt DESC", - "statement_metadata": { - "size": 79, - "tables": ["sales", "sales_cte", "products", "employees", "departments", "complex_view", "customer_orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-stored-procedure.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-stored-procedure.json deleted file mode 100644 index 7a4f29f4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-stored-procedure.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE /* test comments \n\tsecond line \n*/ complex_data_audit AS CURSOR emp_cursor IS SELECT employee_id FROM employees; /* another comment */ v_employee_id employees.employee_id%TYPE; BEGIN FOR emp_record IN emp_cursor LOOP v_employee_id := emp_record.employee_id; INSERT INTO audit_log (message) VALUES ('Auditing employee with ID: ' || v_employee_id); FOR c IN (SELECT * FROM customer_orders WHERE employee_id = v_employee_id) LOOP IF c.order_status = 'Pending' THEN UPDATE customer_orders SET order_status = 'Under Review' WHERE order_id = c.order_id; ELSE INSERT INTO audit_log (message) VALUES ('Order ' || c.order_id || ' already processed'); END IF; END LOOP; END LOOP; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(-20002, 'Error in complex_data_audit'); END complex_data_audit;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE complex_data_audit emp_cursor IS SELECT employee_id FROM employees; v_employee_id employees.employee_id % TYPE; BEGIN FOR emp_record IN emp_cursor LOOP v_employee_id := emp_record.employee_id; INSERT INTO audit_log ( message ) VALUES ( ? || v_employee_id ); FOR c IN ( SELECT * FROM customer_orders WHERE employee_id = v_employee_id ) LOOP IF c.order_status = ? THEN UPDATE customer_orders SET order_status = ? WHERE order_id = c.order_id; ELSE INSERT INTO audit_log ( message ) VALUES ( ? || c.order_id || ? ); END IF; END LOOP; END LOOP; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR ( ? ); END complex_data_audit", - "statement_metadata": { - "size": 135, - "tables": ["employees", "audit_log", "customer_orders"], - "commands": ["CREATE", "SELECT", "BEGIN", "INSERT", "UPDATE"], - "comments": ["/* test comments \n\tsecond line \n*/", "/* another comment */"], - "procedures": ["complex_data_audit"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/plsql-blocks.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/plsql-blocks.json deleted file mode 100644 index 9e098025..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/plsql-blocks.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE x NUMBER; BEGIN SELECT COUNT(*) INTO x FROM employees; DBMS_OUTPUT.PUT_LINE('Count: ' || x); END;", - "outputs": [ - { - "expected": "DECLARE x NUMBER; BEGIN SELECT COUNT ( * ) INTO x FROM employees; DBMS_OUTPUT.PUT_LINE ( ? || x ); END", - "statement_metadata": { - "size": 21, - "tables": ["x", "employees"], - "commands": ["BEGIN", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/super-complex-oracle-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/super-complex-oracle-query.json deleted file mode 100644 index c1564cce..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/super-complex-oracle-query.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH ranked_sales AS (SELECT product_id, SUM(amount) AS total_sales, RANK() OVER (ORDER BY SUM(amount) DESC) sales_rank FROM sales GROUP BY product_id), dept_costs AS (SELECT department_id, SUM(test_amt) AS total_sal FROM employees GROUP BY department_id), latest_transactions AS (SELECT t.account_id, t.amount, ROW_NUMBER() OVER (PARTITION BY t.account_id ORDER BY t.transaction_date DESC) rn FROM transactions t WHERE t.transaction_date >= ADD_MONTHS(SYSDATE, -6)) SELECT e.employee_id, e.last_name, e.test_amt, d.department_name, d.location_id, rs.total_sales, rs.sales_rank, lt.amount AS latest_transaction_amount FROM employees e INNER JOIN departments d ON e.department_id = d.id LEFT JOIN ranked_sales rs ON e.product_id = rs.product_id LEFT JOIN latest_transactions lt ON e.account_id = lt.account_id AND lt.rn = 1 WHERE e.hire_date > '2010-01-01' AND (d.budget > (SELECT AVG(total_sal) FROM dept_costs) OR e.test_amt > (SELECT AVG(test_amt) FROM employees WHERE department_id = e.department_id)) AND EXISTS (SELECT 1 FROM customer_orders co WHERE co.employee_id = e.employee_id AND co.order_status = 'Completed') ORDER BY e.department_id, e.test_amt DESC;", - "outputs": [ - { - "expected": "WITH ranked_sales AS ( SELECT product_id, SUM ( amount ), RANK ( ) OVER ( ORDER BY SUM ( amount ) DESC ) sales_rank FROM sales GROUP BY product_id ), dept_costs AS ( SELECT department_id, SUM ( test_amt ) FROM employees GROUP BY department_id ), latest_transactions AS ( SELECT t.account_id, t.amount, ROW_NUMBER ( ) OVER ( PARTITION BY t.account_id ORDER BY t.transaction_date DESC ) rn FROM transactions t WHERE t.transaction_date >= ADD_MONTHS ( SYSDATE, ? ) ) SELECT e.employee_id, e.last_name, e.test_amt, d.department_name, d.location_id, rs.total_sales, rs.sales_rank, lt.amount FROM employees e INNER JOIN departments d ON e.department_id = d.id LEFT JOIN ranked_sales rs ON e.product_id = rs.product_id LEFT JOIN latest_transactions lt ON e.account_id = lt.account_id AND lt.rn = ? WHERE e.hire_date > ? AND ( d.budget > ( SELECT AVG ( total_sal ) FROM dept_costs ) OR e.test_amt > ( SELECT AVG ( test_amt ) FROM employees WHERE department_id = e.department_id ) ) AND EXISTS ( SELECT ? FROM customer_orders co WHERE co.employee_id = e.employee_id AND co.order_status = ? ) ORDER BY e.department_id, e.test_amt DESC", - "statement_metadata": { - "size": 103, - "tables": ["sales", "employees", "transactions", "departments", "ranked_sales", "latest_transactions", "dept_costs", "customer_orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/conditional-delete-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/conditional-delete-with-case.json deleted file mode 100644 index a9f54046..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/conditional-delete-with-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM user_notifications WHERE id IN (SELECT id FROM notifications WHERE recipient_id = 123 AND status = CASE WHEN urgency = 'High' THEN 'Unread' ELSE 'Read' END);", - "outputs": [ - { - "expected": "DELETE FROM user_notifications WHERE id IN ( SELECT id FROM notifications WHERE recipient_id = ? AND status = CASE WHEN urgency = ? THEN ? ELSE ? END )", - "statement_metadata": { - "size": 43, - "tables": ["user_notifications", "notifications"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-basic.json deleted file mode 100644 index ab3115cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE last_purchase_date < ADD_MONTHS(SYSDATE, -12);", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE last_purchase_date < ADD_MONTHS ( SYSDATE, ? )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-cascade.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-cascade.json deleted file mode 100644 index a0b7a7bd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-cascade.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE customer_id = 456 CASCADE;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE customer_id = ? CASCADE", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-using-rowid.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-using-rowid.json deleted file mode 100644 index 90ab26d6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-using-rowid.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM employees WHERE rowid = (SELECT max(rowid) FROM employees WHERE department_id = 20);", - "outputs": [ - { - "expected": "DELETE FROM employees WHERE rowid = ( SELECT max ( rowid ) FROM employees WHERE department_id = ? )", - "statement_metadata": { - "size": 21, - "tables": ["employees"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-where-current-of.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-where-current-of.json deleted file mode 100644 index 4752fe7b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-where-current-of.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM employees WHERE CURRENT OF emp_cursor;", - "outputs": [ - { - "expected": "DELETE FROM employees WHERE CURRENT OF emp_cursor", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-complex-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-complex-subqueries.json deleted file mode 100644 index c6fc3ee6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-complex-subqueries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM products WHERE id IN (SELECT p.id FROM products p JOIN inventory i ON p.id = i.product_id WHERE i.quantity = 0);", - "outputs": [ - { - "expected": "DELETE FROM products WHERE id IN ( SELECT p.id FROM products p JOIN inventory i ON p.id = i.product_id WHERE i.quantity = ? )", - "statement_metadata": { - "size": 33, - "tables": ["products", "inventory"], - "commands": ["DELETE", "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-flashback-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-flashback-query.json deleted file mode 100644 index 7083c47c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-flashback-query.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders AS OF TIMESTAMP TO_TIMESTAMP('2023-03-15 08:30:00', 'YYYY-MM-DD HH24:MI:SS') WHERE order_date < '2023-01-01';", - "outputs": [ - { - "expected": "DELETE FROM orders AS OF TIMESTAMP TO_TIMESTAMP ( ? ) WHERE order_date < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-join-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-join-syntax.json deleted file mode 100644 index d3ec876d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-join-syntax.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders o WHERE EXISTS (SELECT 1 FROM customers c WHERE o.customer_id = c.id AND c.status = 'Inactive');", - "outputs": [ - { - "expected": "DELETE FROM orders o WHERE EXISTS ( SELECT ? FROM customers c WHERE o.customer_id = c.id AND c.status = ? )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-pseudocolumns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-pseudocolumns.json deleted file mode 100644 index b1ab1b26..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-pseudocolumns.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM session_logs WHERE ROWNUM <= 10;", - "outputs": [ - { - "expected": "DELETE FROM session_logs WHERE ROWNUM <= ?", - "statement_metadata": { - "size": 18, - "tables": ["session_logs"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-returning-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-returning-clause.json deleted file mode 100644 index 95be2ac0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-returning-clause.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM logs WHERE entry_date < SYSDATE RETURNING id INTO :deleted_ids;", - "outputs": [ - { - "expected": "DELETE FROM logs WHERE entry_date < SYSDATE RETURNING id INTO :deleted_ids", - "statement_metadata": { - "size": 10, - "tables": ["logs"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-subquery.json deleted file mode 100644 index 7018cda6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "DELETE FROM logs WHERE entry_date < (SELECT MIN(order_date) FROM orders);", - "outputs": [ - { - "expected": "DELETE FROM logs WHERE entry_date < ( SELECT MIN ( order_date ) FROM orders )", - "statement_metadata": { - "size": 22, - "tables": ["logs", "orders"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "DELETE FROM logs WHERE entry_date < (SELECT MIN(order_date) FROM orders);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-into-multiple-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-into-multiple-tables.json deleted file mode 100644 index b7c00447..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-into-multiple-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT ALL INTO sales (product_id, amount) VALUES (product_id, amount) INTO audit_log (action_type, message) VALUES ('INSERT', 'Inserted into sales') SELECT product_id, amount FROM temp_sales WHERE amount > 1000;", - "outputs": [ - { - "expected": "INSERT ALL INTO sales ( product_id, amount ) VALUES ( product_id, amount ) INTO audit_log ( action_type, message ) VALUES ( ? ) SELECT product_id, amount FROM temp_sales WHERE amount > ?", - "statement_metadata": { - "size": 36, - "tables": ["sales", "audit_log", "temp_sales"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-multiple-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-multiple-conditions.json deleted file mode 100644 index e16049fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-multiple-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT ALL WHEN amount <= 100 THEN INTO small_orders (order_id, amount) VALUES (order_id, amount) WHEN amount BETWEEN 101 AND 500 THEN INTO medium_orders (order_id, amount) VALUES (order_id, amount) ELSE INTO large_orders (order_id, amount) VALUES (order_id, amount) SELECT order_id, amount FROM orders;", - "outputs": [ - { - "expected": "INSERT ALL WHEN amount <= ? THEN INTO small_orders ( order_id, amount ) VALUES ( order_id, amount ) WHEN amount BETWEEN ? AND ? THEN INTO medium_orders ( order_id, amount ) VALUES ( order_id, amount ) ELSE INTO large_orders ( order_id, amount ) VALUES ( order_id, amount ) SELECT order_id, amount FROM orders", - "statement_metadata": { - "size": 55, - "tables": ["small_orders", "medium_orders", "large_orders", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-basic.json deleted file mode 100644 index 96a9f576..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customers (id, name, address) VALUES (101, 'John Doe', '123 Oracle Ln');", - "outputs": [ - { - "expected": "INSERT INTO customers ( id, name, address ) VALUES ( ? )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-using-decode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-using-decode.json deleted file mode 100644 index 21c14044..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-using-decode.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO user_log (user_id, action, log_date) SELECT user_id, DECODE(activity_type, 'LOGIN', 'Logged In', 'LOGOUT', 'Logged Out', 'Unknown'), SYSDATE FROM user_activity;", - "outputs": [ - { - "expected": "INSERT INTO user_log ( user_id, action, log_date ) SELECT user_id, DECODE ( activity_type, ?, ?, ?, ?, ? ), SYSDATE FROM user_activity", - "statement_metadata": { - "size": 33, - "tables": ["user_log", "user_activity"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-column-ordering.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-column-ordering.json deleted file mode 100644 index 6e5baa5d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-column-ordering.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customer_addresses (address, city, customer_id) VALUES ('123 Main St', 'Anytown', 456);", - "outputs": [ - { - "expected": "INSERT INTO customer_addresses ( address, city, customer_id ) VALUES ( ? )", - "statement_metadata": { - "size": 24, - "tables": ["customer_addresses"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-returning-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-returning-clause.json deleted file mode 100644 index 10f8de21..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-returning-clause.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO transactions (account_id, amount) VALUES (123, 500) RETURNING transaction_id INTO :new_id;", - "outputs": [ - { - "expected": "INSERT INTO transactions ( account_id, amount ) VALUES ( ? ) RETURNING transaction_id INTO :new_id", - "statement_metadata": { - "size": 18, - "tables": ["transactions"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-select-union.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-select-union.json deleted file mode 100644 index 19405ecb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-select-union.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO log (message) SELECT 'User logged in' FROM dual UNION ALL SELECT 'User performed an action' FROM dual;", - "outputs": [ - { - "expected": "INSERT INTO log ( message ) SELECT ? FROM dual UNION ALL SELECT ? FROM dual", - "statement_metadata": { - "size": 19, - "tables": ["log", "dual"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-sequence.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-sequence.json deleted file mode 100644 index 155a7e93..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-sequence.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO products (id, name, price) VALUES (product_seq.NEXTVAL, 'New Product', 99.99);", - "outputs": [ - { - "expected": "INSERT INTO products ( id, name, price ) VALUES ( product_seq.NEXTVAL, ?, ? )", - "statement_metadata": { - "size": 14, - "tables": ["products"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-subquery.json deleted file mode 100644 index 56068700..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "INSERT INTO orders (id, user_id, amount) SELECT order_seq.NEXTVAL, user_id, 100 FROM users WHERE status = 'active';", - "outputs": [ - { - "expected": "INSERT INTO orders ( id, user_id, amount ) SELECT order_seq.NEXTVAL, user_id, ? FROM users WHERE status = ?", - "statement_metadata": { - "size": 23, - "tables": ["orders", "users"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "INSERT INTO orders (id, user_id, amount) SELECT order_seq.NEXTVAL, user_id, ? FROM users WHERE status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/multitable-insert-conditional.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/multitable-insert-conditional.json deleted file mode 100644 index e901ad84..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/multitable-insert-conditional.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT FIRST INTO sales_audit (action) VALUES ('Sale occurred') WHEN amount > 1000 THEN INTO high_value_sales (sale_id, amount) VALUES (sale_id, amount) SELECT sale_id, amount FROM sales;", - "outputs": [ - { - "expected": "INSERT FIRST INTO sales_audit ( action ) VALUES ( ? ) WHEN amount > ? THEN INTO high_value_sales ( sale_id, amount ) VALUES ( sale_id, amount ) SELECT sale_id, amount FROM sales", - "statement_metadata": { - "size": 44, - "tables": ["sales_audit", "high_value_sales", "sales"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-in-out-params.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-in-out-params.json deleted file mode 100644 index 989dbefa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-in-out-params.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE CalculateDiscount(p_order_id IN NUMBER, p_discount OUT NUMBER) AS total_amount NUMBER; BEGIN SELECT SUM(price * quantity) INTO total_amount FROM order_items WHERE order_id = p_order_id; p_discount := total_amount * 0.1; END CalculateDiscount;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE CalculateDiscount(p_order_id IN NUMBER, p_discount OUT NUMBER) NUMBER; BEGIN SELECT SUM(price * quantity) INTO total_amount FROM order_items WHERE order_id = p_order_id; p_discount := total_amount * ?; END CalculateDiscount;", - "statement_metadata": { - "size": 57, - "tables": ["total_amount", "order_items"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["CalculateDiscount"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-cursors.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-cursors.json deleted file mode 100644 index 7571ee8c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-cursors.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE FetchCustomerOrders(p_customer_id IN NUMBER) IS CURSOR order_cursor IS SELECT * FROM orders WHERE customer_id = p_customer_id; order_rec order_cursor%ROWTYPE; BEGIN OPEN order_cursor; LOOP FETCH order_cursor INTO order_rec; EXIT WHEN order_cursor%NOTFOUND; END LOOP; CLOSE order_cursor; END FetchCustomerOrders;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE FetchCustomerOrders(p_customer_id IN NUMBER) IS CURSOR order_cursor IS SELECT * FROM orders WHERE customer_id = p_customer_id; order_rec order_cursor % ROWTYPE; BEGIN OPEN order_cursor; LOOP FETCH order_cursor INTO order_rec; EXIT WHEN order_cursor % NOTFOUND; END LOOP; CLOSE order_cursor; END FetchCustomerOrders;", - "statement_metadata": { - "size": 51, - "tables": ["orders", "order_rec"], - "commands": ["CREATE", "SELECT", "BEGIN"], - "comments": [], - "procedures": ["FetchCustomerOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-exception-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-exception-handling.json deleted file mode 100644 index d7518955..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-exception-handling.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE DeleteCustomer(p_customer_id IN NUMBER) AS BEGIN DELETE FROM customers WHERE id = p_customer_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(-20001, 'Error deleting customer.'); END DeleteCustomer;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE DeleteCustomer(p_customer_id IN NUMBER) AS BEGIN DELETE FROM customers WHERE id = p_customer_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(?); END DeleteCustomer;", - "statement_metadata": { - "size": 40, - "tables": ["customers"], - "commands": ["CREATE", "BEGIN", "DELETE"], - "comments": [], - "procedures": ["DeleteCustomer"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-simple-stored-procedure.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-simple-stored-procedure.json deleted file mode 100644 index 3bc5dd56..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-simple-stored-procedure.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE UpdateOrderStatus(p_order_id IN NUMBER, p_status IN VARCHAR2) AS BEGIN UPDATE orders SET status = p_status WHERE order_id = p_order_id; END UpdateOrderStatus;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE UpdateOrderStatus(p_order_id IN NUMBER, p_status IN VARCHAR?) AS BEGIN UPDATE orders SET status = p_status WHERE order_id = p_order_id; END UpdateOrderStatus;", - "statement_metadata": { - "size": 40, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE"], - "comments": [], - "procedures": ["UpdateOrderStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/error-handling-exception.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/error-handling-exception.json deleted file mode 100644 index d225325d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/error-handling-exception.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE update_test_amt(p_employee_id NUMBER, p_change NUMBER) AS BEGIN UPDATE employees SET test_amt = test_amt + p_change WHERE employee_id = p_employee_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(-20001, 'Invalid test_amt update'); END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE update_test_amt(p_employee_id NUMBER, p_change NUMBER) AS BEGIN UPDATE employees SET test_amt = test_amt + p_change WHERE employee_id = p_employee_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(?); END;", - "statement_metadata": { - "size": 41, - "tables": ["employees"], - "commands": ["CREATE", "BEGIN", "UPDATE"], - "comments": [], - "procedures": ["update_test_amt"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure-with-exec.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure-with-exec.json deleted file mode 100644 index a1abd418..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure-with-exec.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "EXEC UpdateOrderStatus(123, 'Shipped');", - "outputs": [ - { - "expected": "EXEC UpdateOrderStatus(?);", - "statement_metadata": { - "size": 4, - "tables": [], - "commands": ["EXEC"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure.json deleted file mode 100644 index 91ad0a1a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "BEGIN UpdateOrderStatus(123, 'Shipped'); END;", - "outputs": [ - { - "expected": "BEGIN UpdateOrderStatus(?); END;", - "statement_metadata": { - "size": 5, - "tables": [], - "commands": ["BEGIN"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/packages.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/packages.json deleted file mode 100644 index aa27903d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/packages.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PACKAGE mgmt AS PROCEDURE test_proc_1(p_name VARCHAR2); PROCEDURE test_proc_2(p_id NUMBER); END mgmt;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PACKAGE mgmt AS PROCEDURE test_proc_1(p_name VARCHAR?); PROCEDURE test_proc_2(p_id NUMBER); END mgmt;", - "statement_metadata": { - "size": 28, - "tables": [], - "commands": ["CREATE"], - "comments": [], - "procedures": ["test_proc_1", "test_proc_2"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/pipelined-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/pipelined-functions.json deleted file mode 100644 index 5ec81089..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/pipelined-functions.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_departments RETURN dept_t PIPELINED AS BEGIN FOR r IN (SELECT * FROM departments) LOOP PIPE ROW(r); END LOOP; RETURN; END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_departments RETURN dept_t PIPELINED AS BEGIN FOR r IN (SELECT * FROM departments) LOOP PIPE ROW(r); END LOOP; RETURN; END;", - "statement_metadata": { - "size": 28, - "tables": ["departments"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/stored-procedures-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/stored-procedures-functions.json deleted file mode 100644 index a6ffffbe..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/stored-procedures-functions.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE get_employee_count(p_dept_id IN NUMBER, p_count OUT NUMBER) AS BEGIN SELECT COUNT(*) INTO p_count FROM employees WHERE department_id = p_dept_id; END; BEGIN get_employee_count(10, :count); END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE get_employee_count(p_dept_id IN NUMBER, p_count OUT NUMBER) AS BEGIN SELECT COUNT(*) INTO p_count FROM employees WHERE department_id = p_dept_id; END; BEGIN get_employee_count(?, :count); END;", - "statement_metadata": { - "size": 51, - "tables": ["p_count", "employees"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["get_employee_count"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/triggers.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/triggers.json deleted file mode 100644 index 8ff82d4d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/triggers.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE TRIGGER audit_table AFTER INSERT ON logs FOR EACH ROW BEGIN INSERT INTO audit_log (action) VALUES ('Inserted new log'); END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE TRIGGER audit_table AFTER INSERT ON logs FOR EACH ROW BEGIN INSERT INTO audit_log (action) VALUES (?); END;", - "statement_metadata": { - "size": 26, - "tables": ["audit_log"], - "commands": ["CREATE", "INSERT", "BEGIN"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/complex-join-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/complex-join-operations.json deleted file mode 100644 index f801075a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/complex-join-operations.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e JOIN departments d ON e.department_id = d.department_id WHERE e.test_amt > (SELECT AVG(test_amt) FROM employees WHERE department_id = e.department_id);", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e JOIN departments d ON e.department_id = d.department_id WHERE e.test_amt > ( SELECT AVG ( test_amt ) FROM employees WHERE department_id = e.department_id )", - "statement_metadata": { - "size": 30, - "tables": ["employees", "departments"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/full-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/full-hint.json deleted file mode 100644 index 89a6981c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/full-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ FULL(e) */ employee_id, first_name, last_name FROM employees e WHERE department_id = 10;", - "outputs": [ - { - "expected": "SELECT employee_id, first_name, last_name FROM employees e WHERE department_id = ?;", - "statement_metadata": { - "size": 29, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ FULL(e) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/hierarchical-queries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/hierarchical-queries.json deleted file mode 100644 index 3881a255..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/hierarchical-queries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS NULL CONNECT BY PRIOR employee_id = manager_id;", - "outputs": [ - { - "expected": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS ? CONNECT BY PRIOR employee_id = manager_id", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/index-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/index-hint.json deleted file mode 100644 index cd6e00ac..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/index-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ INDEX(e employee_index) */ employee_id, first_name, last_name FROM employees e WHERE department_id = 10;", - "outputs": [ - { - "expected": "SELECT employee_id, first_name, last_name FROM employees e WHERE department_id = ?;", - "statement_metadata": { - "size": 45, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ INDEX(e employee_index) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/large-objects-lobs.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/large-objects-lobs.json deleted file mode 100644 index eae0403c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/large-objects-lobs.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, DBMS_LOB.SUBSTR(blob_data, 2000, 1) as blob_content, DBMS_LOB.SUBSTR(clob_data, 2000, 1) as clob_content FROM lob_test WHERE id = 1;", - "outputs": [ - { - "expected": "SELECT id, DBMS_LOB.SUBSTR ( blob_data, ?, ? ), DBMS_LOB.SUBSTR ( clob_data, ?, ? ) FROM lob_test WHERE id = ?", - "statement_metadata": { - "size": 14, - "tables": ["lob_test"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/multiple-hints.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/multiple-hints.json deleted file mode 100644 index deddd94d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/multiple-hints.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ LEADING(e) USE_HASH(d) */ e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "statement_metadata": { - "size": 44, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ LEADING(e) USE_HASH(d) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/optimizer-mode-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/optimizer-mode-hint.json deleted file mode 100644 index 6a19aca0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/optimizer-mode-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ ALL_ROWS */ order_id, description FROM orders WHERE price > 100;", - "outputs": [ - { - "expected": "SELECT order_id, description FROM orders WHERE price > ?;", - "statement_metadata": { - "size": 27, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": ["/*+ ALL_ROWS */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/oracle-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/oracle-text.json deleted file mode 100644 index 33beecd0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/oracle-text.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, title FROM articles WHERE CONTAINS(text, 'Oracle', 1) > 0;", - "outputs": [ - { - "expected": "SELECT id, title FROM articles WHERE CONTAINS ( text, ?, ? ) > ?", - "statement_metadata": { - "size": 14, - "tables": ["articles"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-case-sensitive.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-case-sensitive.json deleted file mode 100644 index bf49d9cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-case-sensitive.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = 'Shipped';", - "outputs": [ - { - "expected": "SELECT OrderId, OrderDate, CustomerName FROM Sales.Orders WHERE OrderStatus = ?", - "statement_metadata": { - "size": 18, - "tables": ["Sales.Orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true, - "Keep_trailing_semicolon": true - }, - "expected": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = ?;" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-special-characters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-special-characters.json deleted file mode 100644 index 89ac3e55..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-special-characters.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE '%Gadget%';", - "outputs": [ - { - "expected": "SELECT * FROM Sales.Order-Details WHERE Product#Name LIKE ?", - "statement_metadata": { - "size": 25, - "tables": ["Sales.Order-Details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true, - "Keep_trailing_semicolon": true - }, - "expected": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE ?;" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/recursive-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/recursive-cte.json deleted file mode 100644 index 8524a7b2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/recursive-cte.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH RECURSIVE subordinates AS (SELECT employee_id, manager_id FROM employees WHERE manager_id IS NULL UNION ALL SELECT e.employee_id, e.manager_id FROM employees e JOIN subordinates s ON e.manager_id = s.employee_id) SELECT * FROM subordinates;", - "outputs": [ - { - "expected": "WITH RECURSIVE subordinates AS ( SELECT employee_id, manager_id FROM employees WHERE manager_id IS ? UNION ALL SELECT e.employee_id, e.manager_id FROM employees e JOIN subordinates s ON e.manager_id = s.employee_id ) SELECT * FROM subordinates", - "statement_metadata": { - "size": 31, - "tables": ["employees", "subordinates"], - "commands": [ "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-basic-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-basic-conditions.json deleted file mode 100644 index d3b8b73f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-basic-conditions.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT id, name FROM users WHERE age > 30 AND status = 'active';", - "outputs": [ - { - "expected": "SELECT id, name FROM users WHERE age > ? AND status = ?", - "statement_metadata": { - "size": 11, - "tables": ["users"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT id, name FROM users WHERE age > ? AND status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-hierarchical-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-hierarchical-query.json deleted file mode 100644 index 947494a6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-hierarchical-query.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS NULL CONNECT BY PRIOR employee_id = manager_id;", - "outputs": [ - { - "expected": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS ? CONNECT BY PRIOR employee_id = manager_id", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS NULL CONNECT BY PRIOR employee_id = manager_id;", - "normalizer_config": { - "keep_trailing_semicolon": true - }, - "obfuscator_config": { - "replace_boolean":false - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-oracle-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-oracle-text.json deleted file mode 100644 index 7878c10a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-oracle-text.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT id, title FROM articles WHERE CONTAINS(text, 'Oracle', 1) > 0;", - "outputs": [ - { - "expected": "SELECT id, title FROM articles WHERE CONTAINS ( text, ?, ? ) > ?", - "statement_metadata": { - "size": 14, - "tables": ["articles"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT id, title FROM articles WHERE CONTAINS(text, ?, ?) > ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-with-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-with-clause.json deleted file mode 100644 index 6bb2fc58..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-with-clause.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "WITH dept_costs AS (SELECT department_id, SUM(test_amt) AS total_sal FROM employees GROUP BY department_id) SELECT * FROM dept_costs WHERE total_sal > (SELECT AVG(total_sal) FROM dept_costs);", - "outputs": [ - { - "expected": "WITH dept_costs AS ( SELECT department_id, SUM ( test_amt ) FROM employees GROUP BY department_id ) SELECT * FROM dept_costs WHERE total_sal > ( SELECT AVG ( total_sal ) FROM dept_costs )", - "statement_metadata": { - "size": 25, - "tables": ["employees", "dept_costs"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "WITH dept_costs AS (SELECT department_id, SUM(test_amt) FROM employees GROUP BY department_id) SELECT * FROM dept_costs WHERE total_sal > (SELECT AVG(total_sal) FROM dept_costs);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-flashback-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-flashback-query.json deleted file mode 100644 index 1c6c6d61..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-flashback-query.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT * FROM employees AS OF TIMESTAMP TO_TIMESTAMP('2023-03-15 08:30:00', 'YYYY-MM-DD HH24:MI:SS') WHERE department_id = 10;", - "outputs": [ - { - "expected": "SELECT * FROM employees AS OF TIMESTAMP TO_TIMESTAMP ( ? ) WHERE department_id = ?", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM employees AS OF TIMESTAMP TO_TIMESTAMP(?) WHERE department_id = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-model-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-model-clause.json deleted file mode 100644 index 17f3fd95..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-model-clause.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT * FROM (SELECT year, product, amount FROM sales) MODEL DIMENSION BY (year) MEASURES (product, amount) RULES (amount['2023'] = amount['2022'] * 1.1);", - "outputs": [ - { - "expected": "SELECT * FROM ( SELECT year, product, amount FROM sales ) MODEL DIMENSION BY ( year ) MEASURES ( product, amount ) RULES ( amount [ ? ] = amount [ ? ] * ? )", - "statement_metadata": { - "size": 11, - "tables": ["sales"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM (SELECT year, product, amount FROM sales) MODEL DIMENSION BY (year) MEASURES (product, amount) RULES (amount [?] = amount [?] * ?);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-multi-line-comments.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-multi-line-comments.json deleted file mode 100644 index 2a9fd894..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-multi-line-comments.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT /* Multi-line\n comment */ id, name FROM users WHERE status = 'active';", - "outputs": [ - { - "expected": "SELECT id, name FROM users WHERE status = ?", - "statement_metadata": { - "size": 36, - "tables": ["users"], - "commands": ["SELECT"], - "comments": ["/* Multi-line\n comment */"], - "procedures": [] - } - }, - { - "expected": "SELECT id, name FROM users WHERE status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-oracle-specific-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-oracle-specific-joins.json deleted file mode 100644 index 06b165af..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-oracle-specific-joins.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id(+);", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id ( + )", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id(+);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-partition-by.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-partition-by.json deleted file mode 100644 index 86253a75..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-partition-by.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT department_id, last_name, test_amt, AVG(test_amt) OVER (PARTITION BY department_id) AS avg_dept_test_amt FROM employees;", - "outputs": [ - { - "expected": "SELECT department_id, last_name, test_amt, AVG ( test_amt ) OVER ( PARTITION BY department_id ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT department_id, last_name, test_amt, AVG(test_amt) OVER (PARTITION BY department_id) FROM employees;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-pseudocolumns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-pseudocolumns.json deleted file mode 100644 index a98bd682..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-pseudocolumns.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT LEVEL, ROWNUM, employee_id, last_name FROM employees WHERE ROWNUM <= 10 CONNECT BY PRIOR employee_id = manager_id;", - "outputs": [ - { - "expected": "SELECT LEVEL, ROWNUM, employee_id, last_name FROM employees WHERE ROWNUM <= ? CONNECT BY PRIOR employee_id = manager_id", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT LEVEL, ROWNUM, employee_id, last_name FROM employees WHERE ROWNUM <= ? CONNECT BY PRIOR employee_id = manager_id;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-rollup-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-rollup-function.json deleted file mode 100644 index bc123857..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-rollup-function.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT department_id, job_id, SUM(test_amt) total_test_amt FROM employees GROUP BY ROLLUP (department_id, job_id);", - "outputs": [ - { - "expected": "SELECT department_id, job_id, SUM ( test_amt ) total_test_amt FROM employees GROUP BY ROLLUP ( department_id, job_id )", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT department_id, job_id, SUM(test_amt) total_test_amt FROM employees GROUP BY ROLLUP (department_id, job_id);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-sample-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-sample-clause.json deleted file mode 100644 index 74cc06b7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-sample-clause.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT * FROM employees SAMPLE (10);", - "outputs": [ - { - "expected": "SELECT * FROM employees SAMPLE ( ? )", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM employees SAMPLE (?);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-single-line-comments.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-single-line-comments.json deleted file mode 100644 index 7f32afdb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-single-line-comments.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT id, name FROM users WHERE status = 'active'; -- Single-line comment explaining the query", - "outputs": [ - { - "expected": "SELECT id, name FROM users WHERE status = ?", - "statement_metadata": { - "size": 54, - "tables": ["users"], - "commands": ["SELECT"], - "comments": ["-- Single-line comment explaining the query"], - "procedures": [] - } - }, - { - "expected": "SELECT id, name FROM users WHERE status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-skip-locked.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-skip-locked.json deleted file mode 100644 index 72326d19..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-skip-locked.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT * FROM orders WHERE order_status = 'PENDING' FOR UPDATE SKIP LOCKED;", - "outputs": [ - { - "expected": "SELECT * FROM orders WHERE order_status = ? FOR UPDATE SKIP LOCKED", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["SELECT", "UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM orders WHERE order_status = ? FOR UPDATE SKIP LOCKED;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/use-nl-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/use-nl-hint.json deleted file mode 100644 index a84e0adc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/use-nl-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ USE_NL(e d) */ e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "statement_metadata": { - "size": 33, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ USE_NL(e d) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/window-functions-analytics.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/window-functions-analytics.json deleted file mode 100644 index a0af2d82..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/window-functions-analytics.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT employee_id, test_amt, AVG(yoe) OVER (PARTITION BY department_id) AS avg_department_yoe FROM employees;", - "outputs": [ - { - "expected": "SELECT employee_id, test_amt, AVG ( yoe ) OVER ( PARTITION BY department_id ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-case.json deleted file mode 100644 index 949243d5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-case.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE employees SET test_amt = CASE WHEN job_id = 'XX' THEN test_amt * 1.10 WHEN job_id = 'YY' THEN test_amt * 1.20 ELSE test_amt END;", - "outputs": [ - { - "expected": "UPDATE employees SET test_amt = CASE WHEN job_id = ? THEN test_amt * ? WHEN job_id = ? THEN test_amt * ? ELSE test_amt END", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE employees SET test_amt = CASE WHEN job_id = ? THEN test_amt * ? WHEN job_id = ? THEN test_amt * ? ELSE test_amt END;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-decode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-decode.json deleted file mode 100644 index 5f9e2398..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-decode.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE order_items SET discount = DECODE(quantity, 10, 5, 20, 10, 0) WHERE order_id = 456;", - "outputs": [ - { - "expected": "UPDATE order_items SET discount = DECODE ( quantity, ?, ?, ?, ?, ? ) WHERE order_id = ?", - "statement_metadata": { - "size": 17, - "tables": ["order_items"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE order_items SET discount = DECODE(quantity, ?, ?, ?, ?, ?) WHERE order_id = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/dynamic-plsql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/dynamic-plsql.json deleted file mode 100644 index 91cdcb8c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/dynamic-plsql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "BEGIN EXECUTE IMMEDIATE 'UPDATE logs SET retention = retention * 1.1'; END;", - "outputs": [ - { - "expected": "BEGIN EXECUTE IMMEDIATE ?; END", - "statement_metadata": { - "size": 12, - "tables": [], - "commands": ["BEGIN", "EXECUTE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-basic.json deleted file mode 100644 index d5ee0289..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-basic.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE employees SET test_amt = test_amt * 1.05 WHERE department_id = 3;", - "outputs": [ - { - "expected": "UPDATE employees SET test_amt = test_amt * ? WHERE department_id = ?", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE employees SET test_amt = test_amt * ? WHERE department_id = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-oracle-specific-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-oracle-specific-syntax.json deleted file mode 100644 index a7f6baea..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-oracle-specific-syntax.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE (SELECT e.test_amt, d.budget FROM employees e JOIN departments d ON e.department_id = d.id) t SET t.test_amt = t.test_amt * 1.05, t.budget = t.budget - 1000;", - "outputs": [ - { - "expected": "UPDATE ( SELECT e.test_amt, d.budget FROM employees e JOIN departments d ON e.department_id = d.id ) t SET t.test_amt = t.test_amt * ?, t.budget = t.budget - ?", - "statement_metadata": { - "size": 36, - "tables": ["employees", "departments"], - "commands": ["UPDATE", "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE (SELECT e.test_amt, d.budget FROM employees e JOIN departments d ON e.department_id = d.id) t SET t.test_amt = t.test_amt * ?, t.budget = t.budget - ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-correlated-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-correlated-subquery.json deleted file mode 100644 index 3ba8e9d5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-correlated-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE orders o SET o.status = 'DELAYED' WHERE EXISTS (SELECT 1 FROM shipments s WHERE s.order_id = o.id AND s.estimated_arrival < SYSDATE);", - "outputs": [ - { - "expected": "UPDATE orders o SET o.status = ? WHERE EXISTS ( SELECT ? FROM shipments s WHERE s.order_id = o.id AND s.estimated_arrival < SYSDATE )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "shipments"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE orders o SET o.status = ? WHERE EXISTS (SELECT ? FROM shipments s WHERE s.order_id = o.id AND s.estimated_arrival < SYSDATE);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-join-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-join-syntax.json deleted file mode 100644 index 18d78d25..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-join-syntax.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE (SELECT a.account_balance, t.transaction_amount FROM accounts a JOIN transactions t ON a.account_id = t.account_id) SET account_balance = account_balance + transaction_amount;", - "outputs": [ - { - "expected": "UPDATE ( SELECT a.account_balance, t.transaction_amount FROM accounts a JOIN transactions t ON a.account_id = t.account_id ) SET account_balance = account_balance + transaction_amount", - "statement_metadata": { - "size": 36, - "tables": ["accounts", "transactions"], - "commands": ["UPDATE", "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE (SELECT a.account_balance, t.transaction_amount FROM accounts a JOIN transactions t ON a.account_id = t.account_id) SET account_balance = account_balance + transaction_amount;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-correlated-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-correlated-subquery.json deleted file mode 100644 index 5846dbc7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-correlated-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE customer_orders co SET total_amount = (SELECT SUM(oi.price * oi.quantity) FROM order_items oi WHERE oi.order_id = co.id) WHERE co.status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE customer_orders co SET total_amount = ( SELECT SUM ( oi.price * oi.quantity ) FROM order_items oi WHERE oi.order_id = co.id ) WHERE co.status = ?", - "statement_metadata": { - "size": 38, - "tables": ["customer_orders", "order_items"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE customer_orders co SET total_amount = (SELECT SUM(oi.price * oi.quantity) FROM order_items oi WHERE oi.order_id = co.id) WHERE co.status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-join.json deleted file mode 100644 index 24d901a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-join.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE products p SET p.price = p.price * 1.1 FROM suppliers s WHERE p.supplier_id = s.id AND s.rating > 4;", - "outputs": [ - { - "expected": "UPDATE products p SET p.price = p.price * ? FROM suppliers s WHERE p.supplier_id = s.id AND s.rating > ?", - "statement_metadata": { - "size": 23, - "tables": ["products", "suppliers"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products p SET p.price = p.price * ? FROM suppliers s WHERE p.supplier_id = s.id AND s.rating > ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-returning-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-returning-clause.json deleted file mode 100644 index 8f9c17fa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-returning-clause.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE orders SET order_status = 'Completed' WHERE order_id = 123 RETURNING customer_id, order_total INTO :cust_id, :total;", - "outputs": [ - { - "expected": "UPDATE orders SET order_status = ? WHERE order_id = ? RETURNING customer_id, order_total INTO :cust_id, :total", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE orders SET order_status = ? WHERE order_id = ? RETURNING customer_id, order_total INTO :cust_id, :total;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery-in-set.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery-in-set.json deleted file mode 100644 index d5a81c9e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery-in-set.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE products p SET (p.price, p.stock) = (SELECT s.discounted_price, s.quantity FROM sale_items s WHERE s.product_id = p.id) WHERE EXISTS (SELECT 1 FROM sale_items s WHERE s.product_id = p.id);", - "outputs": [ - { - "expected": "UPDATE products p SET ( p.price, p.stock ) = ( SELECT s.discounted_price, s.quantity FROM sale_items s WHERE s.product_id = p.id ) WHERE EXISTS ( SELECT ? FROM sale_items s WHERE s.product_id = p.id )", - "statement_metadata": { - "size": 30, - "tables": ["products", "sale_items"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products p SET (p.price, p.stock) = (SELECT s.discounted_price, s.quantity FROM sale_items s WHERE s.product_id = p.id) WHERE EXISTS (SELECT ? FROM sale_items s WHERE s.product_id = p.id);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery.json deleted file mode 100644 index 740712c5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE products SET price = price * 0.9 WHERE id IN (SELECT product_id FROM inventory WHERE quantity > 100);", - "outputs": [ - { - "expected": "UPDATE products SET price = price * ? WHERE id IN ( SELECT product_id FROM inventory WHERE quantity > ? )", - "statement_metadata": { - "size": 29, - "tables": ["products", "inventory"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products SET price = price * ? WHERE id IN (SELECT product_id FROM inventory WHERE quantity > ?);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/delete-complex-subqueries-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/delete-complex-subqueries-joins.json deleted file mode 100644 index ef558339..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/delete-complex-subqueries-joins.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM \n users u\nUSING \n orders o,\n order_items oi,\n products p\nWHERE \n u.id = o.user_id\nAND o.id = oi.order_id\nAND oi.product_id = p.id\nAND p.category = 'obsolete'\nAND o.order_date < NOW() - INTERVAL '5 years';", - "outputs": [ - { - "expected": "DELETE FROM users u USING orders o, order_items oi, products p WHERE u.id = o.user_id AND o.id = oi.order_id AND oi.product_id = p.id AND p.category = ? AND o.order_date < NOW ( ) - INTERVAL ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/insert-complex-select-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/insert-complex-select-joins.json deleted file mode 100644 index 841ce4b9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/insert-complex-select-joins.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "INSERT INTO order_summaries (order_id, product_count, total_amount, average_product_price)\nSELECT \n o.id,\n COUNT(p.id),\n SUM(oi.amount),\n AVG(p.price)\nFROM \n orders o\nJOIN order_items oi ON o.id = oi.order_id\nJOIN products p ON oi.product_id = p.id\nGROUP BY \n o.id\nHAVING \n SUM(oi.amount) > 1000;", - "outputs": [ - { - "expected": "INSERT INTO order_summaries ( order_id, product_count, total_amount, average_product_price ) SELECT o.id, COUNT ( p.id ), SUM ( oi.amount ), AVG ( p.price ) FROM orders o JOIN order_items oi ON o.id = oi.order_id JOIN products p ON oi.product_id = p.id GROUP BY o.id HAVING SUM ( oi.amount ) > ?", - "statement_metadata": { - "size": 56, - "tables": [ - "order_summaries", - "orders", - "order_items", - "products" - ], - "commands": [ - "INSERT", - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-aggregates-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-aggregates-subqueries.json deleted file mode 100644 index 1b2126a2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-aggregates-subqueries.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT \n u.id,\n u.name,\n (SELECT COUNT(*) FROM orders o WHERE o.user_id = u.id) AS order_count,\n (SELECT SUM(amount) FROM payments p WHERE p.user_id = u.id) AS total_payments,\n (SELECT AVG(rating) FROM reviews r WHERE r.user_id = u.id) AS average_rating\nFROM \n users u\nWHERE \n EXISTS (\n SELECT 1 FROM logins l WHERE l.user_id = u.id AND l.time > NOW() - INTERVAL '1 month'\n )\nAND u.status = 'active'\nORDER BY \n total_payments DESC, average_rating DESC, order_count DESC\nLIMIT 10;", - "outputs": [ - { - "expected": "SELECT u.id, u.name, ( SELECT COUNT ( * ) FROM orders o WHERE o.user_id = u.id ), ( SELECT SUM ( amount ) FROM payments p WHERE p.user_id = u.id ), ( SELECT AVG ( rating ) FROM reviews r WHERE r.user_id = u.id ) FROM users u WHERE EXISTS ( SELECT ? FROM logins l WHERE l.user_id = u.id AND l.time > NOW ( ) - INTERVAL ? ) AND u.status = ? ORDER BY total_payments DESC, average_rating DESC, order_count DESC LIMIT ?", - "statement_metadata": { - "size": 38, - "tables": [ - "orders", - "payments", - "reviews", - "users", - "logins" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-joins-window-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-joins-window-functions.json deleted file mode 100644 index 71029e52..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-joins-window-functions.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT \n e1.name AS employee_name,\n e1.test_amt,\n e2.name AS manager_name,\n AVG(e2.test_amt) OVER (PARTITION BY e1.manager_id) AS avg_manager_test_amt,\n RANK() OVER (ORDER BY e1.test_amt DESC) AS test_amt_rank\nFROM \n employees e1\nLEFT JOIN employees e2 ON e1.manager_id = e2.id\nWHERE \n e1.department_id IN (SELECT id FROM departments WHERE name LIKE 'IT%')\nAND \n e1.hire_date > '2020-01-01'\nORDER BY \n test_amt_rank, avg_manager_test_amt DESC;", - "outputs": [ - { - "expected": "SELECT e?.name, e?.test_amt, e?.name, AVG ( e?.test_amt ) OVER ( PARTITION BY e?.manager_id ), RANK ( ) OVER ( ORDER BY e?.test_amt DESC ) FROM employees e? LEFT JOIN employees e? ON e?.manager_id = e?.id WHERE e?.department_id IN ( SELECT id FROM departments WHERE name LIKE ? ) AND e?.hire_date > ? ORDER BY test_amt_rank, avg_manager_test_amt DESC", - "statement_metadata": { - "size": 30, - "tables": [ - "employees", - "departments" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-nested-subqueries-aggregates-limits.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-nested-subqueries-aggregates-limits.json deleted file mode 100644 index d8642455..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-nested-subqueries-aggregates-limits.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT \n user_id,\n order_id,\n order_total,\n user_total\nFROM (\n SELECT \n o.user_id,\n o.id AS order_id,\n o.total AS order_total,\n (SELECT SUM(total) FROM orders WHERE user_id = o.user_id) AS user_total,\n RANK() OVER (PARTITION BY o.user_id ORDER BY o.total DESC) AS rnk\n FROM \n orders o\n) sub\nWHERE \n sub.rnk = 1\nAND user_total > (\n SELECT \n AVG(total) * 2 \n FROM orders\n);", - "outputs": [ - { - "expected": "SELECT user_id, order_id, order_total, user_total FROM ( SELECT o.user_id, o.id, o.total, ( SELECT SUM ( total ) FROM orders WHERE user_id = o.user_id ), RANK ( ) OVER ( PARTITION BY o.user_id ORDER BY o.total DESC ) FROM orders o ) sub WHERE sub.rnk = ? AND user_total > ( SELECT AVG ( total ) * ? FROM orders )", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/update-complex-subquery-conditional.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/update-complex-subquery-conditional.json deleted file mode 100644 index 15d51e02..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/update-complex-subquery-conditional.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "UPDATE \n products p\nSET \n price = CASE \n WHEN p.stock < 10 THEN p.price * 1.10\n WHEN p.stock BETWEEN 10 AND 50 THEN p.price\n ELSE p.price * 0.90\n END,\n last_updated = NOW()\nFROM (\n SELECT \n product_id, \n SUM(quantity) AS stock\n FROM \n inventory\n GROUP BY \n product_id\n) AS sub\nWHERE \n sub.product_id = p.id;", - "outputs": [ - { - "expected": "UPDATE products p SET price = CASE WHEN p.stock < ? THEN p.price * ? WHEN p.stock BETWEEN ? AND ? THEN p.price ELSE p.price * ? END, last_updated = NOW ( ) FROM ( SELECT product_id, SUM ( quantity ) FROM inventory GROUP BY product_id ) WHERE sub.product_id = p.id", - "statement_metadata": { - "size": 29, - "tables": [ - "products", - "inventory" - ], - "commands": [ - "UPDATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-all-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-all-rows.json deleted file mode 100644 index 96eb980f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-all-rows.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM temp_table;", - "outputs": [ - { - "expected": "DELETE FROM temp_table", - "statement_metadata": { - "size": 16, - "tables": [ - "temp_table" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-returning.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-returning.json deleted file mode 100644 index 772ac106..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-returning.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE id = 8 RETURNING *;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE id = ? RETURNING *", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-simple.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-simple.json deleted file mode 100644 index 0ddcff7a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-simple.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM users WHERE id = 7;", - "outputs": [ - { - "expected": "DELETE FROM users WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-using-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-using-join.json deleted file mode 100644 index 60d22f4a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-using-join.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM user_logins USING users WHERE user_logins.user_id = users.id AND users.status = 'inactive';", - "outputs": [ - { - "expected": "DELETE FROM user_logins USING users WHERE user_logins.user_id = users.id AND users.status = ?", - "statement_metadata": { - "size": 17, - "tables": [ - "user_logins" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-cte.json deleted file mode 100644 index e721079c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-cte.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "WITH deleted AS (\n DELETE FROM users WHERE last_login < NOW() - INTERVAL '2 years' RETURNING *\n)\nSELECT * FROM deleted;", - "outputs": [ - { - "expected": "WITH deleted AS ( DELETE FROM users WHERE last_login < NOW ( ) - INTERVAL ? RETURNING * ) SELECT * FROM deleted", - "statement_metadata": { - "size": 24, - "tables": [ - "users", - "deleted" - ], - "commands": [ - "DELETE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-subquery.json deleted file mode 100644 index 8857f3aa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-subquery.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "DELETE FROM comments WHERE user_id IN (SELECT id FROM users WHERE status = 'banned');", - "outputs": [ - { - "expected": "DELETE FROM comments WHERE user_id IN ( SELECT id FROM users WHERE status = ? )", - "statement_metadata": { - "size": 25, - "tables": [ - "comments", - "users" - ], - "commands": [ - "DELETE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-that-raises-notice.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-that-raises-notice.json deleted file mode 100644 index 3bb0c7cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-that-raises-notice.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION log_activity(activity text) RETURNS void AS $func$\nBEGIN\n RAISE NOTICE 'Activity: %', activity;\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION log_activity ( activity text ) RETURNS void AS $func$BEGIN RAISE NOTICE ?, activity; END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION log_activity ( activity text ) RETURNS void AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-dynamic-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-dynamic-query.json deleted file mode 100644 index 8804741e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-dynamic-query.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION dynamic_query(sql_query text) RETURNS SETOF RECORD AS $func$\nBEGIN\n RETURN QUERY EXECUTE sql_query;\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION dynamic_query ( sql_query text ) RETURNS SETOF RECORD AS $func$BEGIN RETURN QUERY EXECUTE sql_query; END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION dynamic_query ( sql_query text ) RETURNS SETOF RECORD AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-parameters.json deleted file mode 100644 index 18f8e7f1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_user_email(user_id integer) RETURNS text AS $func$\nBEGIN\n RETURN (SELECT email FROM users WHERE id = user_id);\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_user_email ( user_id integer ) RETURNS text AS $func$BEGIN RETURN ( SELECT email FROM users WHERE id = user_id ); END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION get_user_email ( user_id integer ) RETURNS text AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-table-return.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-table-return.json deleted file mode 100644 index dd9092d1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-table-return.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_users() RETURNS TABLE(user_id integer, user_name text) AS $func$\nBEGIN\n RETURN QUERY SELECT id, name FROM users;\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_users ( ) RETURNS TABLE ( user_id integer, user_name text ) AS $func$BEGIN RETURN QUERY SELECT id, name FROM users; END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION get_users ( ) RETURNS TABLE ( user_id integer, user_name text ) AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-simple-plpgsql-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-simple-plpgsql-function.json deleted file mode 100644 index 0147b7d2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-simple-plpgsql-function.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_user_count() RETURNS integer AS $func$\nBEGIN\n RETURN (SELECT COUNT(*) FROM users);\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_user_count ( ) RETURNS integer AS $func$BEGIN RETURN ( SELECT COUNT ( * ) FROM users ); END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION get_user_count ( ) RETURNS integer AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-positional-parameters.json deleted file mode 100644 index fe57a79b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-positional-parameters.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "input": "SELECT calculate_discount($1, $2);", - "outputs": [ - { - "expected": "SELECT calculate_discount ( ? )" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "normalizer_config": { - "remove_space_between_parentheses": true - }, - "expected": "SELECT calculate_discount($1, $2)" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-returning-table.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-returning-table.json deleted file mode 100644 index 2b80ec4b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-returning-table.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT * FROM get_users();", - "outputs": [ - { - "expected": "SELECT * FROM get_users ( )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-that-raises-notice.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-that-raises-notice.json deleted file mode 100644 index 7e627ece..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-that-raises-notice.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT log_activity('User logged in');", - "outputs": [ - { - "expected": "SELECT log_activity ( ? )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-dynamic-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-dynamic-query.json deleted file mode 100644 index 492c03bc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-dynamic-query.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT * FROM dynamic_query('SELECT * FROM users WHERE id = 1') AS t(id integer, name text, email text);", - "outputs": [ - { - "expected": "SELECT * FROM dynamic_query ( ? ) AS t ( id integer, name text, email text )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-parameter.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-parameter.json deleted file mode 100644 index 0c4d1d5d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-parameter.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT get_user_email(1);", - "outputs": [ - { - "expected": "SELECT get_user_email ( ? )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-simple-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-simple-function.json deleted file mode 100644 index 3af547cc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-simple-function.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT get_user_count();", - "outputs": [ - { - "expected": "SELECT get_user_count ( )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-array-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-array-data.json deleted file mode 100644 index 0887b145..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-array-data.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "INSERT INTO users (name, favorite_numbers) VALUES ('Array User', ARRAY[3, 6, 9]);", - "outputs": [ - { - "expected": "INSERT INTO users ( name, favorite_numbers ) VALUES ( ?, ARRAY [ ? ] )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "INSERT INTO users (name, favorite_numbers) VALUES (?, ARRAY [?])", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-json-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-json-data.json deleted file mode 100644 index 7a9c6559..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-json-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO events (data) VALUES ('{\"type\": \"user_signup\", \"user_id\": 1}');", - "outputs": [ - { - "expected": "INSERT INTO events ( data ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-multiple-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-multiple-rows.json deleted file mode 100644 index 7101dc7f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-multiple-rows.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (name, email) VALUES ('Jane Doe', 'jane@example.com'), ('Bob Smith', 'bob@example.com');", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email ) VALUES ( ? ), ( ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-positional-parameters.json deleted file mode 100644 index b85086c5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "INSERT INTO users (name, email, age) VALUES ($1, $2, $3);", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email, age ) VALUES ( ? )" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "INSERT INTO users ( name, email, age ) VALUES ( $1, $2, $3 )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-returning-positional-parameter.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-returning-positional-parameter.json deleted file mode 100644 index d9f4e220..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-returning-positional-parameter.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "INSERT INTO orders (product_id, quantity, total) VALUES ($1, $2, $3) RETURNING id;", - "outputs": [ - { - "expected": "INSERT INTO orders ( product_id, quantity, total ) VALUES ( ? ) RETURNING id" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "INSERT INTO orders ( product_id, quantity, total ) VALUES ( $1, $2, $3 ) RETURNING id" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-simple-row.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-simple-row.json deleted file mode 100644 index 88bbe702..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-simple-row.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (name, email) VALUES ('John Doe', 'john@example.com');", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email ) VALUES ( ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-do-nothing.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-do-nothing.json deleted file mode 100644 index 0372dab6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-do-nothing.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (id, name, email) VALUES (1, 'Duplicate', 'duplicate@example.com') ON CONFLICT (id) DO NOTHING;", - "outputs": [ - { - "expected": "INSERT INTO users ( id, name, email ) VALUES ( ? ) ON CONFLICT ( id ) DO NOTHING", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-update.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-update.json deleted file mode 100644 index 428cebb5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-update.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "INSERT INTO users (id, name, email) VALUES (1, 'Duplicate', 'duplicate@example.com') ON CONFLICT (id) DO UPDATE SET email = EXCLUDED.email;", - "outputs": [ - { - "expected": "INSERT INTO users ( id, name, email ) VALUES ( ? ) ON CONFLICT ( id ) DO UPDATE SET email = EXCLUDED.email", - "statement_metadata": { - "size": 17, - "tables": [ - "users" - ], - "commands": [ - "INSERT", - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-default.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-default.json deleted file mode 100644 index 1f3a69f0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-default.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO products (name, price, description) VALUES ('New Product', 123, DEFAULT);", - "outputs": [ - { - "expected": "INSERT INTO products ( name, price, description ) VALUES ( ?, DEFAULT )", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-enum-type.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-enum-type.json deleted file mode 100644 index 0bb98b29..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-enum-type.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO shipments (status) VALUES ('delivered'::shipment_status);", - "outputs": [ - { - "expected": "INSERT INTO shipments ( status ) VALUES ( ? :: shipment_status )", - "statement_metadata": { - "size": 15, - "tables": [ - "shipments" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-geometric-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-geometric-data.json deleted file mode 100644 index bbae656c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-geometric-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO places (name, location) VALUES ('Point Place', point '(10, 20)');", - "outputs": [ - { - "expected": "INSERT INTO places ( name, location ) VALUES ( ?, point ? )", - "statement_metadata": { - "size": 12, - "tables": [ - "places" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-hstore-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-hstore-data.json deleted file mode 100644 index a71f5fba..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-hstore-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO user_profiles (profile) VALUES ('\"height\"=>\"2m\", \"weight\"=>\"70kg\"');", - "outputs": [ - { - "expected": "INSERT INTO user_profiles ( profile ) VALUES ( ? )", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-range-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-range-data.json deleted file mode 100644 index c17593e5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-range-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO reservations (during) VALUES ('[2023-01-01 14:00, 2023-01-01 15:00)');", - "outputs": [ - { - "expected": "INSERT INTO reservations ( during ) VALUES ( ? )", - "statement_metadata": { - "size": 18, - "tables": [ - "reservations" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-returning.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-returning.json deleted file mode 100644 index b75c50fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-returning.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (name, email) VALUES ('Alice Jones', 'alice@example.com') RETURNING id;", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email ) VALUES ( ? ) RETURNING id", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-select.json deleted file mode 100644 index 5325d6b5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-select.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "INSERT INTO user_logins (user_id, login_time) SELECT id, NOW() FROM users WHERE active;", - "outputs": [ - { - "expected": "INSERT INTO user_logins ( user_id, login_time ) SELECT id, NOW ( ) FROM users WHERE active", - "statement_metadata": { - "size": 28, - "tables": [ - "user_logins", - "users" - ], - "commands": [ - "INSERT", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-subquery-and-alias.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-subquery-and-alias.json deleted file mode 100644 index dfdbfe79..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-subquery-and-alias.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "INSERT INTO user_logins (user_id, login_time) SELECT u.id, NOW() FROM users u WHERE u.active;", - "outputs": [ - { - "expected": "INSERT INTO user_logins ( user_id, login_time ) SELECT u.id, NOW ( ) FROM users u WHERE u.active", - "statement_metadata": { - "size": 28, - "tables": [ - "user_logins", - "users" - ], - "commands": [ - "INSERT", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/aggregate-functions-count.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/aggregate-functions-count.json deleted file mode 100644 index 3119a2c0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/aggregate-functions-count.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT COUNT(*) AS total_users FROM users;", - "outputs": [ - { - "expected": "SELECT COUNT ( * ) FROM users", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT COUNT(*) FROM users", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/basic_select_with_alias.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/basic_select_with_alias.json deleted file mode 100644 index 5fd8a336..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/basic_select_with_alias.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "input": "SELECT u.id AS user_id, u.name AS username FROM users u;", - "outputs": [ - { - "expected": "SELECT u.id, u.name FROM users u", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_sql_alias": true - }, - "expected": "SELECT u.id AS user_id, u.name AS username FROM users u" - }, - { - "normalizer_config": { - "keep_trailing_semicolon": true - }, - "expected": "SELECT u.id, u.name FROM users u;" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/case-statements.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/case-statements.json deleted file mode 100644 index 6beb2c0a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/case-statements.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT name, CASE WHEN age < 18 THEN 'minor' ELSE 'adult' END FROM users;", - "outputs": [ - { - "expected": "SELECT name, CASE WHEN age < ? THEN ? ELSE ? END FROM users", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/common-table-expressions-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/common-table-expressions-cte.json deleted file mode 100644 index 34a1eb9e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/common-table-expressions-cte.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "WITH recursive_subordinates AS (\n SELECT id, manager_id FROM employees WHERE id = 1\n UNION ALL\n SELECT e.id, e.manager_id FROM employees e INNER JOIN recursive_subordinates rs ON rs.id = e.manager_id\n)\nSELECT * FROM recursive_subordinates;", - "outputs": [ - { - "expected": "WITH recursive_subordinates AS ( SELECT id, manager_id FROM employees WHERE id = ? UNION ALL SELECT e.id, e.manager_id FROM employees e INNER JOIN recursive_subordinates rs ON rs.id = e.manager_id ) SELECT * FROM recursive_subordinates", - "statement_metadata": { - "size": 41, - "tables": [ - "employees", - "recursive_subordinates" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/cross-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/cross-joins.json deleted file mode 100644 index aeaa4a1f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/cross-joins.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM users CROSS JOIN cities;", - "outputs": [ - { - "expected": "SELECT * FROM users CROSS JOIN cities", - "statement_metadata": { - "size": 21, - "tables": [ - "users", - "cities" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/distinct-on-expressions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/distinct-on-expressions.json deleted file mode 100644 index 25d8e90d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/distinct-on-expressions.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT DISTINCT ON (location) location, time FROM events ORDER BY location, time DESC;", - "outputs": [ - { - "expected": "SELECT DISTINCT ON ( location ) location, time FROM events ORDER BY location, time DESC", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/fetch-first-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/fetch-first-clause.json deleted file mode 100644 index a6b7f906..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/fetch-first-clause.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM users ORDER BY created_at DESC FETCH FIRST 10 ROWS ONLY;", - "outputs": [ - { - "expected": "SELECT * FROM users ORDER BY created_at DESC FETCH FIRST ? ROWS ONLY", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/for-update-of.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/for-update-of.json deleted file mode 100644 index b8299044..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/for-update-of.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "SELECT * FROM users WHERE last_login < NOW() - INTERVAL '1 year' FOR UPDATE OF users;", - "outputs": [ - { - "expected": "SELECT * FROM users WHERE last_login < NOW ( ) - INTERVAL ? FOR UPDATE OF users", - "statement_metadata": { - "size": 17, - "tables": [ - "users" - ], - "commands": [ - "SELECT", - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/full-outer-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/full-outer-joins.json deleted file mode 100644 index f3329ff7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/full-outer-joins.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM customers FULL OUTER JOIN orders ON customers.id = orders.customer_id;", - "outputs": [ - { - "expected": "SELECT * FROM customers FULL OUTER JOIN orders ON customers.id = orders.customer_id", - "statement_metadata": { - "size": 25, - "tables": [ - "customers", - "orders" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/group-by-having.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/group-by-having.json deleted file mode 100644 index 58b73c1b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/group-by-having.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT status, COUNT(*) FROM orders GROUP BY status HAVING COUNT(*) > 1;", - "outputs": [ - { - "expected": "SELECT status, COUNT ( * ) FROM orders GROUP BY status HAVING COUNT ( * ) > ?", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT status, COUNT(*) FROM orders GROUP BY status HAVING COUNT(*) > ?", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/json-field-access.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/json-field-access.json deleted file mode 100644 index c4cdfdce..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/json-field-access.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT data->'customer'->>'name' AS customer_name FROM orders;", - "outputs": [ - { - "expected": "SELECT data -> ? ->> ? FROM orders", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-elements-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-elements-text.json deleted file mode 100644 index 9cce21b8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-elements-text.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_array_elements_text(data->'tags') AS tag FROM products;", - "outputs": [ - { - "expected": "SELECT jsonb_array_elements_text ( data -> ? ) FROM products", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_array_elements_text(data -> ?) FROM products", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-length.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-length.json deleted file mode 100644 index f0113eea..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-length.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_array_length(data->'tags') AS num_tags FROM products;", - "outputs": [ - { - "expected": "SELECT jsonb_array_length ( data -> ? ) FROM products", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_array_length(data -> ?) FROM products", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contained-in-path.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contained-in-path.json deleted file mode 100644 index 27ecb7d9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contained-in-path.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM events WHERE payload <@ '{\"events\": {\"type\": \"user_event\"}}';", - "outputs": [ - { - "expected": "SELECT * FROM events WHERE payload <@ ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-key.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-key.json deleted file mode 100644 index 1485e099..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-key.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM events WHERE payload ? 'user_id';", - "outputs": [ - { - "expected": "SELECT * FROM events WHERE payload ? ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-object-at-top-level.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-object-at-top-level.json deleted file mode 100644 index 9fae952b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-object-at-top-level.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM events WHERE payload @> '{\"type\": \"user_event\"}';", - "outputs": [ - { - "expected": "SELECT * FROM events WHERE payload @> ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-array-element.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-array-element.json deleted file mode 100644 index c224a8b5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-array-element.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT data #- '{tags,0}' AS tags_without_first FROM products;", - "outputs": [ - { - "expected": "SELECT data #- ? FROM products", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-key.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-key.json deleted file mode 100644 index ce40e284..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-key.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT data - 'temporary_field' AS cleaned_data FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT data - ? FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-path.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-path.json deleted file mode 100644 index f452fd1c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-path.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "input": "SELECT jsonb_set(data, '{info,address}', NULL) AS removed_address FROM users;", - "outputs": [ - { - "expected": "SELECT jsonb_set ( data, ?, ? ) FROM users", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_set(data, ?, NULL) FROM users", - "obfuscator_config": { - "replace_null": false - }, - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path-text.json deleted file mode 100644 index 9f7b5297..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path-text.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_extract_path_text(data, 'user', 'name') AS user_name FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT jsonb_extract_path_text ( data, ?, ? ) FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_extract_path_text(data, ?, ?) FROM user_profiles", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path.json deleted file mode 100644 index 79e00e1b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_extract_path(data, 'user', 'name') AS user_name FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT jsonb_extract_path ( data, ?, ? ) FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_extract_path(data, ?, ?) FROM user_profiles", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-pretty-print.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-pretty-print.json deleted file mode 100644 index 6b373c2e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-pretty-print.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_pretty(data) AS pretty_data FROM logs;", - "outputs": [ - { - "expected": "SELECT jsonb_pretty ( data ) FROM logs", - "statement_metadata": { - "size": 10, - "tables": [ - "logs" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_pretty(data) FROM logs", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-set-new-value.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-set-new-value.json deleted file mode 100644 index 433bac49..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-set-new-value.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_set(data, '{user,name}', '\"John Doe\"') AS updated_data FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT jsonb_set ( data, ?, ? ) FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_set(data, ?, ?) FROM user_profiles", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/lateral-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/lateral-joins.json deleted file mode 100644 index feb22135..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/lateral-joins.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "input": "SELECT u.name, json_agg(l) FROM users u, LATERAL (SELECT id, text FROM logs WHERE logs.user_id = u.id) AS l GROUP BY u.name;", - "outputs": [ - { - "expected": "SELECT u.name, json_agg ( l ) FROM users u, LATERAL ( SELECT id, text FROM logs WHERE logs.user_id = u.id ) GROUP BY u.name", - "statement_metadata": { - "size": 15, - "tables": [ - "users", - "logs" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT u.name, json_agg(l) FROM users u, LATERAL (SELECT id, text FROM logs WHERE logs.user_id = u.id) GROUP BY u.name", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/limit-and-offset.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/limit-and-offset.json deleted file mode 100644 index 6bdafb4c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/limit-and-offset.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM users ORDER BY created_at DESC LIMIT 10 OFFSET 20;", - "outputs": [ - { - "expected": "SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/natural-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/natural-joins.json deleted file mode 100644 index 437be5db..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/natural-joins.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM users NATURAL JOIN user_profiles;", - "outputs": [ - { - "expected": "SELECT * FROM users NATURAL JOIN user_profiles", - "statement_metadata": { - "size": 28, - "tables": [ - "users", - "user_profiles" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-case-sensitive.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-case-sensitive.json deleted file mode 100644 index d9896d15..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-case-sensitive.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = 'Shipped'", - "outputs": [ - { - "expected": "SELECT OrderId, OrderDate, CustomerName FROM Sales.Orders WHERE OrderStatus = ?", - "statement_metadata": { - "size": 18, - "tables": ["Sales.Orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true - }, - "expected": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = ?" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-special-characters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-special-characters.json deleted file mode 100644 index e7203e6f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-special-characters.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE '%Gadget%'", - "outputs": [ - { - "expected": "SELECT * FROM Sales.Order-Details WHERE Product#Name LIKE ?", - "statement_metadata": { - "size": 25, - "tables": ["Sales.Order-Details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true - }, - "expected": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE ?" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-in-clause-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-in-clause-positional-parameters.json deleted file mode 100644 index 3c02bbca..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-in-clause-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "SELECT * FROM orders WHERE status IN ($1, $2, $3);", - "outputs": [ - { - "expected": "SELECT * FROM orders WHERE status IN ( ? )" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "SELECT * FROM orders WHERE status IN ( $1, $2, $3 )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-multiple-conditions-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-multiple-conditions-positional-parameters.json deleted file mode 100644 index 08e468bb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-multiple-conditions-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "SELECT * FROM products WHERE category = $1 AND price < $2;", - "outputs": [ - { - "expected": "SELECT * FROM products WHERE category = ? AND price < ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "SELECT * FROM products WHERE category = $1 AND price < $2" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-with-positional-parameter.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-with-positional-parameter.json deleted file mode 100644 index 18007873..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-with-positional-parameter.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "SELECT * FROM users WHERE id = $1;", - "outputs": [ - { - "expected": "SELECT * FROM users WHERE id = ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "SELECT * FROM users WHERE id = $1" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/self-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/self-joins.json deleted file mode 100644 index 9a34cbd9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/self-joins.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT a.name, b.name FROM employees a, employees b WHERE a.manager_id = b.id;", - "outputs": [ - { - "expected": "SELECT a.name, b.name FROM employees a, employees b WHERE a.manager_id = b.id", - "statement_metadata": { - "size": 15, - "tables": [ - "employees" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-from.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-from.json deleted file mode 100644 index af14f66e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-from.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT user_data.name FROM (SELECT name FROM users WHERE active = true) AS user_data;", - "outputs": [ - { - "expected": "SELECT user_data.name FROM ( SELECT name FROM users WHERE active = ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT user_data.name FROM ( SELECT name FROM users WHERE active = true )", - "obfuscator_config": { - "replace_boolean": false - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-select.json deleted file mode 100644 index a8f2cea3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-select.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "SELECT name, (SELECT COUNT(*) FROM orders WHERE orders.user_id = users.id) AS order_count FROM users;", - "outputs": [ - { - "expected": "SELECT name, ( SELECT COUNT ( * ) FROM orders WHERE orders.user_id = users.id ) FROM users", - "statement_metadata": { - "size": 17, - "tables": [ - "orders", - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-where.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-where.json deleted file mode 100644 index a8516ffd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-where.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "SELECT name FROM users WHERE id IN (SELECT user_id FROM orders WHERE total > 100);", - "outputs": [ - { - "expected": "SELECT name FROM users WHERE id IN ( SELECT user_id FROM orders WHERE total > ? )", - "statement_metadata": { - "size": 17, - "tables": [ - "users", - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/tablesample-bernoulli.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/tablesample-bernoulli.json deleted file mode 100644 index c7a9c533..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/tablesample-bernoulli.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM users TABLESAMPLE BERNOULLI (10);", - "outputs": [ - { - "expected": "SELECT * FROM users TABLESAMPLE BERNOULLI ( ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-array-append.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-array-append.json deleted file mode 100644 index 907d8621..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-array-append.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET favorite_numbers = array_append(favorite_numbers, 42) WHERE id = 5;", - "outputs": [ - { - "expected": "UPDATE users SET favorite_numbers = array_append ( favorite_numbers, ? ) WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-increment-numeric.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-increment-numeric.json deleted file mode 100644 index 7feabe9b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-increment-numeric.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE accounts SET balance = balance + 100.0 WHERE user_id = 4;", - "outputs": [ - { - "expected": "UPDATE accounts SET balance = balance + ? WHERE user_id = ?", - "statement_metadata": { - "size": 14, - "tables": [ - "accounts" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-json-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-json-data.json deleted file mode 100644 index 95ba8927..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-json-data.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "UPDATE events SET data = jsonb_set(data, '{location}', '\"New Location\"') WHERE data->>'event_id' = '123';", - "outputs": [ - { - "expected": "UPDATE events SET data = jsonb_set ( data, ?, ? ) WHERE data ->> ? = ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE events SET data = jsonb_set(data, ?, ?) WHERE data ->> ? = ?", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-multiple-fields-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-multiple-fields-positional-parameters.json deleted file mode 100644 index 04a7d986..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-multiple-fields-positional-parameters.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "DELETE FROM sessions WHERE user_id = $1 AND expired = true;", - "outputs": [ - { - "expected": "DELETE FROM sessions WHERE user_id = ? AND expired = ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false, - "replace_boolean": false - }, - "expected": "DELETE FROM sessions WHERE user_id = $1 AND expired = true" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-positional-parameters.json deleted file mode 100644 index 631b13c2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "UPDATE users SET email = $1 WHERE id = $2;", - "outputs": [ - { - "expected": "UPDATE users SET email = ? WHERE id = ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "UPDATE users SET email = $1 WHERE id = $2" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-returning.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-returning.json deleted file mode 100644 index e65fc2ee..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-returning.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "UPDATE users SET last_login = NOW() WHERE id = 3 RETURNING last_login;", - "outputs": [ - { - "expected": "UPDATE users SET last_login = NOW ( ) WHERE id = ? RETURNING last_login", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE users SET last_login = NOW() WHERE id = ? RETURNING last_login", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-multiple-columns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-multiple-columns.json deleted file mode 100644 index 62d037b2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-multiple-columns.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET name = 'Jane Updated', email = 'jane.updated@example.com' WHERE id = 2;", - "outputs": [ - { - "expected": "UPDATE users SET name = ?, email = ? WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-single-column.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-single-column.json deleted file mode 100644 index 85972d0c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-single-column.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET name = 'John Updated' WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE users SET name = ? WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-using-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-using-join.json deleted file mode 100644 index 219899f2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-using-join.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "UPDATE orders SET total = total * 0.9 FROM users WHERE users.id = orders.user_id AND users.status = 'VIP';", - "outputs": [ - { - "expected": "UPDATE orders SET total = total * ? FROM users WHERE users.id = orders.user_id AND users.status = ?", - "statement_metadata": { - "size": 17, - "tables": [ - "orders", - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-case.json deleted file mode 100644 index 82cdf51a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-case.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET status = CASE WHEN last_login < NOW() - INTERVAL '1 year' THEN 'inactive' ELSE status END;", - "outputs": [ - { - "expected": "UPDATE users SET status = CASE WHEN last_login < NOW ( ) - INTERVAL ? THEN ? ELSE status END", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-cte.json deleted file mode 100644 index 60fd0c4f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-cte.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "WITH updated AS (\n UPDATE users SET name = 'CTE Updated' WHERE id = 6 RETURNING *\n)\nSELECT * FROM updated;", - "outputs": [ - { - "expected": "WITH updated AS ( UPDATE users SET name = ? WHERE id = ? RETURNING * ) SELECT * FROM updated", - "statement_metadata": { - "size": 24, - "tables": [ - "users", - "updated" - ], - "commands": [ - "UPDATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-subquery.json deleted file mode 100644 index a49aec33..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-subquery.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "input": "UPDATE products SET price = (SELECT MAX(price) FROM products) * 0.9 WHERE name = 'Old Product';", - "outputs": [ - { - "expected": "UPDATE products SET price = ( SELECT MAX ( price ) FROM products ) * ? WHERE name = ?", - "statement_metadata": { - "size": 20, - "tables": [ - "products" - ], - "commands": [ - "UPDATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products SET price = (SELECT MAX(price) FROM products) * ? WHERE name = ?", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfa..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c0..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d736..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der deleted file mode 100644 index d8c3710c..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d736..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem deleted file mode 100644 index 60c4cf06..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD -VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy -MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl -c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn -3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO -7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb -A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T -cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO -VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL -dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI -je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ -l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 -YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM -E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK -BTq2PBvc59T6OFLq ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem deleted file mode 100644 index 9d112d1e..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH -65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM -FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 -rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 -M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf -2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 -ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA -NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 -LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT -EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW -/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ -XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI -wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU -lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC -k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK -UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB -8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o -4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB -Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs -FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv -r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap -CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 -w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr -63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 -Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r -fVMrcL3jSf/W1Xh4TgtyoU8= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem deleted file mode 100644 index 44e436f6..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 -MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE -CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW -QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 -r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg -Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n -rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe -d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR -MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ -yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN -XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x -fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN -9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa -VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G -MTV7jmY9TBPtfhRuO/cG650+F+cw ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem deleted file mode 100644 index 68c60613..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 -MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE -AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh -HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn -H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK -GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA -Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe -LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA -AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G -A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB -AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT -EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn -JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe -2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs -HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI -4Wcvfz/isxgmH1UqIt3oc6ad ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem deleted file mode 100644 index b14ad0f7..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 -GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza -ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 -KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 -CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 -CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd -Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz -yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ -Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V -4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY -QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy -0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp -2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms -GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz -wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ -SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 -cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f -R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn -htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi -AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw -O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh -cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 -EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw -SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x -gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe -1ChfPP1AH+/75MJCvu6wQBQv ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem deleted file mode 100644 index ad1bad59..00000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE -AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 -MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE -CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe -2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW -HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB -cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 -5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z -ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA -ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG -lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ -XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt -Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 -ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU -gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem deleted file mode 100644 index bcf08e4f..00000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS -xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR -4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS -n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp -B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 -vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN -DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S -pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj -ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF -p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn -r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B -7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ -Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 -fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz -1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk -emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP -ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw -Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF -vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD -B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh -eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi -elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 -Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo -mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk -k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ -8x3gNkxJRb4NaLIoNzAhCoo= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5dee..b3c1699b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938..3c57880d 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..e2b298d8 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4b..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020..fdcaa974 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc6993..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8d..36bf8399 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821c..42ff8c3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e411..dca43600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c6398556..5cca668a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e2..d8cae6d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09..28e39afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642..cd66e92c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d7..c1595eba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0..ee9456b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84..8cfca81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d..60b0deb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc7..f90aa728 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e..ba9e0150 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813e..07cdfd6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4e..2f1dd214 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994d..f40519d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d271..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbd..0cc3ce49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc250..856d92d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf246..8d467094 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e..edc17324 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77..445eba20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362e..adba01bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4f..014c4e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca8..ccc97d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d3..ec2b64a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e374..21a839e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c..c11121ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e59..909b631f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195..e49bed16 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943b..66017d2d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc5..47bab18d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399f..eff6bcde 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -832,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1546,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1561,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1608,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1645,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1666,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1689,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1722,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1736,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1748,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1777,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1792,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1850,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1881,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -3399,7 +3463,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4247,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5199,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5612,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad1925..d4577a42 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708c..6395a031 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 25329b76..4b177c82 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -322,6 +322,10 @@ func (d decoder) skipJSONValue() error { if open > d.opts.RecursionLimit { return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") } if open == 0 { return nil diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 00000000..14656b65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb similarity index 69% rename from vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb rename to vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 1a8610a8..18f07568 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb @@ -1,4 +1,4 @@ -  (0æ +  (0æ   (0ç   (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6eb..d2b3ac03 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 193c68e8..8826bcf4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -68,7 +68,7 @@ type ( Extensions Extensions Services Services - EditionFeatures FileEditionFeatures + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage @@ -76,10 +76,13 @@ type ( Locations SourceLocations } - FileEditionFeatures struct { + EditionFeatures struct { // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool @@ -95,6 +98,9 @@ type ( // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool } ) @@ -156,6 +162,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -217,6 +225,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -250,8 +260,7 @@ type ( Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor - // Edition features. - Presence bool + EditionFeatures EditionFeatures } Oneof struct { @@ -261,6 +270,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -310,26 +321,36 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + if fd.L1.Cardinality == protoreflect.Repeated { + return false } - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence + return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsPacked + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { + // proto3 repeated fields are packed by default. + return !fd.L1.HasPacked || fd.L1.IsPacked } return fd.L1.IsPacked } @@ -378,6 +399,9 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsUTF8Validated + } if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } @@ -404,10 +428,11 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9..237e64fd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,6 +115,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +124,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +160,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -166,6 +179,15 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 } + if fd.L1.Syntax == protoreflect.Editions { + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + } + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + // Must allocate all declarations before parsing each descriptor type // to ensure we handled all descriptors in "flattened ordering". if numEnums > 0 { @@ -219,6 +241,28 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd @@ -275,6 +319,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +425,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a7..482a61cc 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -414,6 +414,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +466,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -497,6 +504,13 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.HasEnforceUTF8 = true fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -534,6 +548,7 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte + xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -565,6 +580,12 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } + if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } + if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { + xd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -589,6 +610,13 @@ func (xd *Extension) unmarshalOptions(b []byte) { case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 00000000..0375a49d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + if def, ok := defaultsCache[ed]; ok { + return def + } + panic(fmt.Sprintf("unsupported edition: %v", ed)) +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 8f94230e..40272c89 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -18,6 +18,21 @@ const ( Edition_enum_name = "Edition" ) +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -213,6 +228,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -297,12 +318,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -474,7 +524,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -497,7 +546,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -523,7 +571,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -543,6 +590,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -639,24 +693,59 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + // Names for google.protobuf.FieldOptions.EditionDefault. const ( FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" @@ -813,6 +902,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -909,36 +1005,79 @@ const ( FeatureSet_FieldPresence_enum_name = "FieldPresence" ) +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.EnumType. const ( FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" FeatureSet_EnumType_enum_name = "EnumType" ) +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. const ( FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" ) +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.Utf8Validation. const ( FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" FeatureSet_Utf8Validation_enum_name = "Utf8Validation" ) +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.MessageEncoding. const ( FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" FeatureSet_MessageEncoding_enum_name = "MessageEncoding" ) +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.JsonFormat. const ( FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" FeatureSet_JsonFormat_enum_name = "JsonFormat" ) +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" @@ -1085,3 +1224,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 00000000..fd9015e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b..ad6f80c4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea..49bc73e2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc..2b8f122c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3a..13077751 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60..986322b1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e765..a6e7df24 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index d8f48faf..a50fcfb4 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 32 + Minor = 33 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index aff6fd49..b3278163 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,9 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + } if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +118,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -139,12 +164,16 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc } if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } // We reuse the existing field because the old option `[packed = // true]` is mutually exclusive with the editions feature. - if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + if canBePacked(fd) { f.L1.HasPacked = true - f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + f.L1.IsPacked = f.L1.EditionFeatures.IsPacked } // We pretend this option is always explicitly set because the only @@ -155,9 +184,9 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc // requested from the descriptor). // In proto2/proto3 syntax HasEnforceUTF8 might be false. f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated - if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { f.L1.Kind = protoreflect.GroupKind } } @@ -175,6 +204,9 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } + if parent.Syntax() == protoreflect.Editions { + o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) + } } } return os, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e350..254ca585 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -276,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d564..e4dcaf87 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -107,7 +107,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } if m.Syntax() == protoreflect.Proto3 { @@ -314,8 +314,8 @@ func checkValidGroup(fd protoreflect.FieldDescriptor) error { switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case fd.Syntax() == protoreflect.Proto3: + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") case fd.FullName().Parent() != md.FullName().Parent(): diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 7352926c..2a6b29d1 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -5,14 +5,16 @@ package protodesc import ( - _ "embed" "fmt" "os" "sync" + "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) const ( @@ -20,14 +22,12 @@ const ( SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 ) -//go:embed editions_defaults.binpb -var binaryEditionDefaults []byte var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) func init() { - err := proto.Unmarshal(binaryEditionDefaults, defaults) + err := proto.Unmarshal(editiondefaults.Defaults, defaults) if err != nil { fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) os.Exit(1) @@ -83,37 +83,56 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { return fs } -func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.FieldPresence == nil { - return fileDesc.L1.EditionFeatures.IsFieldPresence +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN } - return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT -} -func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.RepeatedFieldEncoding == nil { - return fileDesc.L1.EditionFeatures.IsPacked + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED } - return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED -} -func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.Utf8Validation == nil { - return fileDesc.L1.EditionFeatures.IsUTF8Validated + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY } - return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY -} -func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.MessageEncoding == nil { - return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED } - return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS } // initFileDescFromFeatureSet initializes editions related fields in fd based @@ -122,56 +141,8 @@ func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descrip // before calling this function. func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { dfs := getFeatureSetFor(fd.L1.Edition) - if fs == nil { - fs = &descriptorpb.FeatureSet{} - } - - var fieldPresence descriptorpb.FeatureSet_FieldPresence - if fp := fs.FieldPresence; fp != nil { - fieldPresence = *fp - } else { - fieldPresence = *dfs.FieldPresence - } - fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fieldPresence == descriptorpb.FeatureSet_EXPLICIT - - var enumType descriptorpb.FeatureSet_EnumType - if et := fs.EnumType; et != nil { - enumType = *et - } else { - enumType = *dfs.EnumType - } - fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN - - var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding - if rfe := fs.RepeatedFieldEncoding; rfe != nil { - respeatedFieldEncoding = *rfe - } else { - respeatedFieldEncoding = *dfs.RepeatedFieldEncoding - } - fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED - - var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation - if utf8val := fs.Utf8Validation; utf8val != nil { - isUTF8Validated = *utf8val - } else { - isUTF8Validated = *dfs.Utf8Validation - } - fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY - - var messageEncoding descriptorpb.FeatureSet_MessageEncoding - if me := fs.MessageEncoding; me != nil { - messageEncoding = *me - } else { - messageEncoding = *dfs.MessageEncoding - } - fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED - - var jsonFormat descriptorpb.FeatureSet_JsonFormat - if jf := fs.JsonFormat; jf != nil { - jsonFormat = *jf - } else { - jsonFormat = *dfs.JsonFormat - } - fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index ec6572df..00b01fbd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -175,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 0c045db6..7dcc2ff0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 38daa858..78624cf6 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -64,6 +64,7 @@ const ( // should not be depended on, but they will always be time-ordered for easy // comparison. Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be // used or relyed on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 @@ -71,31 +72,39 @@ const ( Edition_EDITION_99997_TEST_ONLY Edition = 99997 Edition_EDITION_99998_TEST_ONLY Edition = 99998 Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 ) // Enum value maps for Edition. var ( Edition_name = map[int32]string{ - 0: "EDITION_UNKNOWN", - 998: "EDITION_PROTO2", - 999: "EDITION_PROTO3", - 1000: "EDITION_2023", - 1: "EDITION_1_TEST_ONLY", - 2: "EDITION_2_TEST_ONLY", - 99997: "EDITION_99997_TEST_ONLY", - 99998: "EDITION_99998_TEST_ONLY", - 99999: "EDITION_99999_TEST_ONLY", + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, + "EDITION_2024": 1001, "EDITION_1_TEST_ONLY": 1, "EDITION_2_TEST_ONLY": 2, "EDITION_99997_TEST_ONLY": 99997, "EDITION_99998_TEST_ONLY": 99998, "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, } ) @@ -954,21 +963,21 @@ type FeatureSet_Utf8Validation int32 const ( FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 - FeatureSet_NONE FeatureSet_Utf8Validation = 1 FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 ) // Enum value maps for FeatureSet_Utf8Validation. var ( FeatureSet_Utf8Validation_name = map[int32]string{ 0: "UTF8_VALIDATION_UNKNOWN", - 1: "NONE", 2: "VERIFY", + 3: "NONE", } FeatureSet_Utf8Validation_value = map[string]int32{ "UTF8_VALIDATION_UNKNOWN": 0, - "NONE": 1, "VERIFY": 2, + "NONE": 3, } ) @@ -1643,12 +1652,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -2195,7 +2204,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -2244,7 +2252,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -2352,13 +2359,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2472,10 +2472,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2496,6 +2492,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2655,19 +2655,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -4104,7 +4096,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -4451,7 +4443,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, @@ -4468,337 +4460,355 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, - 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, - 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, - 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, - 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, - 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, - 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, - 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, - 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, - 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, - 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, - 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, - 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, - 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, - 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, - 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, - 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, - 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, - 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, - 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, - 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, - 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, - 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, - 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, - 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, - 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, - 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, - 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, - 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, - 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, - 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, - 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, - 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, - 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, + 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, + 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, + 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, + 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, + 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, + 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, + 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, + 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, + 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, + 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, @@ -4807,276 +4817,258 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, - 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, - 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, - 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, - 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, - 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, - 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, - 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, - 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, - 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, - 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, + 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, - 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, - 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, - 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, - 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, - 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, - 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, - 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, - 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, - 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, - 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, - 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, - 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, - 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, - 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, - 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, - 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, - 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, - 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, + 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, + 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, + 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, + 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, - 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, - 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, - 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, - 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, - 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, - 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, + 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, + 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, + 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, + 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, + 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, + 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, + 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, + 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, + 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, + 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 00000000..25de5ae0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,177 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflect/protodesc/proto/go_features.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "google.protobuf.go", + Tag: "bytes,1002,opt,name=go", + Filename: "reflect/protodesc/proto/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional google.protobuf.GoFeatures go = 1002; + E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] +) + +var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor + +var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, + 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, + 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, + 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, + 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, + 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once + file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc +) + +func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { + file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { + file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) + }) + return file_reflect_protodesc_proto_go_features_proto_rawDescData +} + +var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet + 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_reflect_protodesc_proto_go_features_proto_init() } +func file_reflect_protodesc_proto_go_features_proto_init() { + if File_reflect_protodesc_proto_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, + DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, + MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, + ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + }.Build() + File_reflect_protodesc_proto_go_features_proto = out.File + file_reflect_protodesc_proto_go_features_proto_rawDesc = nil + file_reflect_protodesc_proto_go_features_proto_goTypes = nil + file_reflect_protodesc_proto_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto new file mode 100644 index 00000000..d2465712 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto @@ -0,0 +1,28 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +syntax = "proto2"; + +package google.protobuf; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/protobuf/types/gofeaturespb"; + +extend google.protobuf.FeatureSet { + optional GoFeatures go = 1002; +} + +message GoFeatures { + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + optional bool legacy_unmarshal_json_enum = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + edition_defaults = { edition: EDITION_PROTO2, value: "true" }, + edition_defaults = { edition: EDITION_PROTO3, value: "false" } + ]; +} diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go index c44610db..d2ab60ec 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_32bit.go @@ -77,9 +77,6 @@ func (i *Int32) Store(v int32) { // // It may be helpful to document why a racy operation is permitted. // -// Don't add fields to this struct. It is important that it remain the same -// size as its builtin analogue. -// //go:nosplit func (i *Int32) RacyStore(v int32) { i.value = v @@ -124,6 +121,9 @@ func (i *Int32) ptr() *int32 { // Uint32 is an atomic uint32. // +// Don't add fields to this struct. It is important that it remain the same +// size as its builtin analogue. +// // See aligned_unsafe.go in this directory for justification. // // +stateify savable @@ -210,4 +210,80 @@ func (u *Uint32) ptr() *uint32 { return &u.value } +// Bool is an atomic Boolean. +// +// It is implemented by a Uint32, with value 0 indicating false, and 1 +// indicating true. +// +// +stateify savable +type Bool struct { + Uint32 +} + +// b32 returns a uint32 0 or 1 representing b. +func b32(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// FromBool returns a Bool initialized to value val. +// +//go:nosplit +func FromBool(val bool) Bool { + return Bool{ + Uint32: FromUint32(b32(val)), + } +} + +// Load is analogous to atomic.LoadBool, if such a thing existed. +// +//go:nosplit +func (b *Bool) Load() bool { + return b.Uint32.Load() != 0 +} + +// RacyLoad is analogous to reading an atomic value without using +// synchronization. +// +// It may be helpful to document why a racy operation is permitted. +// +//go:nosplit +func (b *Bool) RacyLoad() bool { + return b.Uint32.RacyLoad() != 0 +} + +// Store is analogous to atomic.StoreBool, if such a thing existed. +// +//go:nosplit +func (b *Bool) Store(val bool) { + b.Uint32.Store(b32(val)) +} + +// RacyStore is analogous to setting an atomic value without using +// synchronization. +// +// It may be helpful to document why a racy operation is permitted. +// +//go:nosplit +func (b *Bool) RacyStore(val bool) { + b.Uint32.RacyStore(b32(val)) +} + +// Swap is analogous to atomic.SwapBool, if such a thing existed. +// +//go:nosplit +func (b *Bool) Swap(val bool) bool { + return b.Uint32.Swap(b32(val)) != 0 +} + +// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing +// existed. +// +//go:nosplit +func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool { + return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal)) +} + // LINT.ThenChange(32b_64bit.go) diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go index 18aa9630..af926eb4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/32b_64bit.go @@ -77,9 +77,6 @@ func (i *Int32) Store(v int32) { // // It may be helpful to document why a racy operation is permitted. // -// Don't add fields to this struct. It is important that it remain the same -// size as its builtin analogue. -// //go:nosplit func (i *Int32) RacyStore(v int32) { i.value = v @@ -124,6 +121,9 @@ func (i *Int32) ptr() *int32 { // Uint32 is an atomic uint32. // +// Don't add fields to this struct. It is important that it remain the same +// size as its builtin analogue. +// // See aligned_unsafe.go in this directory for justification. // // +stateify savable @@ -210,4 +210,80 @@ func (u *Uint32) ptr() *uint32 { return &u.value } +// Bool is an atomic Boolean. +// +// It is implemented by a Uint32, with value 0 indicating false, and 1 +// indicating true. +// +// +stateify savable +type Bool struct { + Uint32 +} + +// b32 returns a uint32 0 or 1 representing b. +func b32(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// FromBool returns a Bool initialized to value val. +// +//go:nosplit +func FromBool(val bool) Bool { + return Bool{ + Uint32: FromUint32(b32(val)), + } +} + +// Load is analogous to atomic.LoadBool, if such a thing existed. +// +//go:nosplit +func (b *Bool) Load() bool { + return b.Uint32.Load() != 0 +} + +// RacyLoad is analogous to reading an atomic value without using +// synchronization. +// +// It may be helpful to document why a racy operation is permitted. +// +//go:nosplit +func (b *Bool) RacyLoad() bool { + return b.Uint32.RacyLoad() != 0 +} + +// Store is analogous to atomic.StoreBool, if such a thing existed. +// +//go:nosplit +func (b *Bool) Store(val bool) { + b.Uint32.Store(b32(val)) +} + +// RacyStore is analogous to setting an atomic value without using +// synchronization. +// +// It may be helpful to document why a racy operation is permitted. +// +//go:nosplit +func (b *Bool) RacyStore(val bool) { + b.Uint32.RacyStore(b32(val)) +} + +// Swap is analogous to atomic.SwapBool, if such a thing existed. +// +//go:nosplit +func (b *Bool) Swap(val bool) bool { + return b.Uint32.Swap(b32(val)) != 0 +} + +// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing +// existed. +// +//go:nosplit +func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool { + return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal)) +} + // LINT.ThenChange(32b_32bit.go) diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go index 4d3cad36..78e501aa 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_state_autogen.go @@ -6,6 +6,8 @@ package atomicbitops import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -27,10 +29,10 @@ func (i *Int32) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &i.value) } -func (i *Int32) afterLoad() {} +func (i *Int32) afterLoad(context.Context) {} // +checklocksignore -func (i *Int32) StateLoad(stateSourceObject state.Source) { +func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.value) } @@ -52,14 +54,40 @@ func (u *Uint32) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &u.value) } -func (u *Uint32) afterLoad() {} +func (u *Uint32) afterLoad(context.Context) {} // +checklocksignore -func (u *Uint32) StateLoad(stateSourceObject state.Source) { +func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &u.value) } +func (b *Bool) StateTypeName() string { + return "pkg/atomicbitops.Bool" +} + +func (b *Bool) StateFields() []string { + return []string{ + "Uint32", + } +} + +func (b *Bool) beforeSave() {} + +// +checklocksignore +func (b *Bool) StateSave(stateSinkObject state.Sink) { + b.beforeSave() + stateSinkObject.Save(0, &b.Uint32) +} + +func (b *Bool) afterLoad(context.Context) {} + +// +checklocksignore +func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) { + stateSourceObject.Load(0, &b.Uint32) +} + func init() { state.Register((*Int32)(nil)) state.Register((*Uint32)(nil)) + state.Register((*Bool)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go index b58524da..606a6d02 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_32bit_unsafe_state_autogen.go @@ -6,6 +6,8 @@ package atomicbitops import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -29,10 +31,10 @@ func (i *Int64) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &i.value32) } -func (i *Int64) afterLoad() {} +func (i *Int64) afterLoad(context.Context) {} // +checklocksignore -func (i *Int64) StateLoad(stateSourceObject state.Source) { +func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.value) stateSourceObject.Load(1, &i.value32) } @@ -57,10 +59,10 @@ func (u *Uint64) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &u.value32) } -func (u *Uint64) afterLoad() {} +func (u *Uint64) afterLoad(context.Context) {} // +checklocksignore -func (u *Uint64) StateLoad(stateSourceObject state.Source) { +func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &u.value) stateSourceObject.Load(1, &u.value32) } diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go index 894dde88..8e6cd37c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_64bit_state_autogen.go @@ -6,6 +6,8 @@ package atomicbitops import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -27,10 +29,10 @@ func (i *Int32) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &i.value) } -func (i *Int32) afterLoad() {} +func (i *Int32) afterLoad(context.Context) {} // +checklocksignore -func (i *Int32) StateLoad(stateSourceObject state.Source) { +func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.value) } @@ -52,13 +54,38 @@ func (u *Uint32) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &u.value) } -func (u *Uint32) afterLoad() {} +func (u *Uint32) afterLoad(context.Context) {} // +checklocksignore -func (u *Uint32) StateLoad(stateSourceObject state.Source) { +func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &u.value) } +func (b *Bool) StateTypeName() string { + return "pkg/atomicbitops.Bool" +} + +func (b *Bool) StateFields() []string { + return []string{ + "Uint32", + } +} + +func (b *Bool) beforeSave() {} + +// +checklocksignore +func (b *Bool) StateSave(stateSinkObject state.Sink) { + b.beforeSave() + stateSinkObject.Save(0, &b.Uint32) +} + +func (b *Bool) afterLoad(context.Context) {} + +// +checklocksignore +func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) { + stateSourceObject.Load(0, &b.Uint32) +} + func (i *Int64) StateTypeName() string { return "pkg/atomicbitops.Int64" } @@ -77,10 +104,10 @@ func (i *Int64) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &i.value) } -func (i *Int64) afterLoad() {} +func (i *Int64) afterLoad(context.Context) {} // +checklocksignore -func (i *Int64) StateLoad(stateSourceObject state.Source) { +func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.value) } @@ -102,16 +129,17 @@ func (u *Uint64) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &u.value) } -func (u *Uint64) afterLoad() {} +func (u *Uint64) afterLoad(context.Context) {} // +checklocksignore -func (u *Uint64) StateLoad(stateSourceObject state.Source) { +func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &u.value) } func init() { state.Register((*Int32)(nil)) state.Register((*Uint32)(nil)) + state.Register((*Bool)(nil)) state.Register((*Int64)(nil)) state.Register((*Uint64)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go index bc8fc35d..ca763da6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go @@ -8,6 +8,8 @@ package atomicbitops import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -29,39 +31,13 @@ func (f *Float64) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &f.bits) } -func (f *Float64) afterLoad() {} +func (f *Float64) afterLoad(context.Context) {} // +checklocksignore -func (f *Float64) StateLoad(stateSourceObject state.Source) { +func (f *Float64) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &f.bits) } -func (b *Bool) StateTypeName() string { - return "pkg/atomicbitops.Bool" -} - -func (b *Bool) StateFields() []string { - return []string{ - "Uint32", - } -} - -func (b *Bool) beforeSave() {} - -// +checklocksignore -func (b *Bool) StateSave(stateSinkObject state.Sink) { - b.beforeSave() - stateSinkObject.Save(0, &b.Uint32) -} - -func (b *Bool) afterLoad() {} - -// +checklocksignore -func (b *Bool) StateLoad(stateSourceObject state.Source) { - stateSourceObject.Load(0, &b.Uint32) -} - func init() { state.Register((*Float64)(nil)) - state.Register((*Bool)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go deleted file mode 100644 index 60e646e8..00000000 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/bool.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2022 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package atomicbitops - -import "sync/atomic" - -// Bool is an atomic Boolean. -// -// It is implemented by a Uint32, with value 0 indicating false, and 1 -// indicating true. -// -// +stateify savable -type Bool struct { - Uint32 -} - -// FromBool returns an Bool initialized to value val. -// -//go:nosplit -func FromBool(val bool) Bool { - var u uint32 - if val { - u = 1 - } - return Bool{ - Uint32{ - value: u, - }, - } -} - -// Load is analogous to atomic.LoadBool, if such a thing existed. -// -//go:nosplit -func (b *Bool) Load() bool { - return atomic.LoadUint32(&b.value) == 1 -} - -// Store is analogous to atomic.StoreBool, if such a thing existed. -// -//go:nosplit -func (b *Bool) Store(val bool) { - var u uint32 - if val { - u = 1 - } - atomic.StoreUint32(&b.value, u) -} - -// Swap is analogous to atomic.SwapBool, if such a thing existed. -// -//go:nosplit -func (b *Bool) Swap(val bool) bool { - var u uint32 - if val { - u = 1 - } - return atomic.SwapUint32(&b.value, u) == 1 -} diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go index cc663ae7..3e6bc6dd 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go @@ -28,7 +28,7 @@ import ( // // +stateify savable type Buffer struct { - data viewList `state:".([]byte)"` + data ViewList `state:".([]byte)"` size int64 } @@ -189,12 +189,9 @@ func (b *Buffer) GrowTo(length int64, zero bool) { sz = int(length - b.size) } - // Zero the written section; note that this pattern is - // specifically recognized and optimized by the compiler. + // Zero the written section. if zero { - for i := v.write; i < v.write+sz; i++ { - v.chunk.data[i] = 0 - } + clear(v.chunk.data[v.write : v.write+sz]) } // Advance the index. @@ -401,6 +398,12 @@ func (b *Buffer) Size() int64 { return b.size } +// AsViewList returns the ViewList backing b. Users may not save or modify the +// ViewList returned. +func (b *Buffer) AsViewList() ViewList { + return b.data +} + // Clone creates a copy-on-write clone of b. The underlying chunks are shared // until they are written to. func (b *Buffer) Clone() Buffer { @@ -479,7 +482,7 @@ func (b *Buffer) Checksum(offset int) uint16 { // operation completes. func (b *Buffer) Merge(other *Buffer) { b.data.PushBackList(&other.data) - other.data = viewList{} + other.data = ViewList{} // Adjust sizes. b.size += other.size @@ -489,6 +492,18 @@ func (b *Buffer) Merge(other *Buffer) { // WriteFromReader writes to the buffer from an io.Reader. A maximum read size // of MaxChunkSize is enforced to prevent allocating views from the heap. func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) { + return b.WriteFromReaderAndLimitedReader(r, count, nil) +} + +// WriteFromReaderAndLimitedReader is the same as WriteFromReader, but +// optimized to avoid allocations if a LimitedReader is passed in. +// +// This function clobbers the values of lr. +func (b *Buffer) WriteFromReaderAndLimitedReader(r io.Reader, count int64, lr *io.LimitedReader) (int64, error) { + if lr == nil { + lr = &io.LimitedReader{} + } + var done int64 for done < count { vsize := count - done @@ -496,8 +511,9 @@ func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) { vsize = MaxChunkSize } v := NewView(int(vsize)) - lr := io.LimitedReader{R: r, N: vsize} - n, err := io.Copy(v, &lr) + lr.R = r + lr.N = vsize + n, err := io.Copy(v, lr) b.Append(v) done += n if err == io.EOF { @@ -572,7 +588,7 @@ func (b *Buffer) readByte() (byte, error) { return bt, nil } -// AsBufferReader returns the Buffer as a BufferReader capabable of io methods. +// AsBufferReader returns the Buffer as a BufferReader capable of io methods. // The new BufferReader takes ownership of b. func (b *Buffer) AsBufferReader() BufferReader { return BufferReader{b} diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go index 8b8e15ea..d57dfa02 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go @@ -14,12 +14,16 @@ package buffer +import ( + "context" +) + // saveData is invoked by stateify. func (b *Buffer) saveData() []byte { return b.Flatten() } // loadData is invoked by stateify. -func (b *Buffer) loadData(data []byte) { +func (b *Buffer) loadData(_ context.Context, data []byte) { *b = MakeWithData(data) } diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go index 7587787c..3e32338f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go @@ -3,6 +3,8 @@ package buffer import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -28,12 +30,12 @@ func (b *Buffer) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &b.size) } -func (b *Buffer) afterLoad() {} +func (b *Buffer) afterLoad(context.Context) {} // +checklocksignore -func (b *Buffer) StateLoad(stateSourceObject state.Source) { +func (b *Buffer) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(1, &b.size) - stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(y.([]byte)) }) + stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(ctx, y.([]byte)) }) } func (c *chunk) StateTypeName() string { @@ -56,10 +58,10 @@ func (c *chunk) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &c.data) } -func (c *chunk) afterLoad() {} +func (c *chunk) afterLoad(context.Context) {} // +checklocksignore -func (c *chunk) StateLoad(stateSourceObject state.Source) { +func (c *chunk) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &c.chunkRefs) stateSourceObject.Load(1, &c.data) } @@ -83,9 +85,9 @@ func (r *chunkRefs) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (r *chunkRefs) StateLoad(stateSourceObject state.Source) { +func (r *chunkRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.refCount) - stateSourceObject.AfterLoad(r.afterLoad) + stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) }) } func (v *View) StateTypeName() string { @@ -110,67 +112,67 @@ func (v *View) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &v.chunk) } -func (v *View) afterLoad() {} +func (v *View) afterLoad(context.Context) {} // +checklocksignore -func (v *View) StateLoad(stateSourceObject state.Source) { +func (v *View) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &v.read) stateSourceObject.Load(1, &v.write) stateSourceObject.Load(2, &v.chunk) } -func (l *viewList) StateTypeName() string { - return "pkg/buffer.viewList" +func (l *ViewList) StateTypeName() string { + return "pkg/buffer.ViewList" } -func (l *viewList) StateFields() []string { +func (l *ViewList) StateFields() []string { return []string{ "head", "tail", } } -func (l *viewList) beforeSave() {} +func (l *ViewList) beforeSave() {} // +checklocksignore -func (l *viewList) StateSave(stateSinkObject state.Sink) { +func (l *ViewList) StateSave(stateSinkObject state.Sink) { l.beforeSave() stateSinkObject.Save(0, &l.head) stateSinkObject.Save(1, &l.tail) } -func (l *viewList) afterLoad() {} +func (l *ViewList) afterLoad(context.Context) {} // +checklocksignore -func (l *viewList) StateLoad(stateSourceObject state.Source) { +func (l *ViewList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } -func (e *viewEntry) StateTypeName() string { - return "pkg/buffer.viewEntry" +func (e *ViewEntry) StateTypeName() string { + return "pkg/buffer.ViewEntry" } -func (e *viewEntry) StateFields() []string { +func (e *ViewEntry) StateFields() []string { return []string{ "next", "prev", } } -func (e *viewEntry) beforeSave() {} +func (e *ViewEntry) beforeSave() {} // +checklocksignore -func (e *viewEntry) StateSave(stateSinkObject state.Sink) { +func (e *ViewEntry) StateSave(stateSinkObject state.Sink) { e.beforeSave() stateSinkObject.Save(0, &e.next) stateSinkObject.Save(1, &e.prev) } -func (e *viewEntry) afterLoad() {} +func (e *ViewEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *viewEntry) StateLoad(stateSourceObject state.Source) { +func (e *ViewEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } @@ -180,6 +182,6 @@ func init() { state.Register((*chunk)(nil)) state.Register((*chunkRefs)(nil)) state.Register((*View)(nil)) - state.Register((*viewList)(nil)) - state.Register((*viewEntry)(nil)) + state.Register((*ViewList)(nil)) + state.Register((*ViewEntry)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go index 551f06db..a58eed02 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go @@ -27,7 +27,7 @@ const ( // number and passing the result to MostSignificantOne64. baseChunkSizeLog2 = 6 - // This is the size of the buffers in the first pool. Each subsquent pool + // This is the size of the buffers in the first pool. Each subsequent pool // creates payloads 2^(pool index) times larger than the first pool's // payloads. baseChunkSize = 1 << baseChunkSizeLog2 // 64 @@ -87,9 +87,7 @@ func newChunk(size int) *chunk { } else { pool := getChunkPool(size) c = pool.Get().(*chunk) - for i := range c.data { - c.data[i] = 0 - } + clear(c.data) } c.InitRefs() return c diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go index 4d2d3c3d..fa0606db 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go @@ -1,6 +1,7 @@ package buffer import ( + "context" "fmt" "gvisor.dev/gvisor/pkg/atomicbitops" @@ -134,7 +135,7 @@ func (r *chunkRefs) DecRef(destroy func()) { } } -func (r *chunkRefs) afterLoad() { +func (r *chunkRefs) afterLoad(context.Context) { if r.ReadRefs() > 0 { refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/view.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view.go index d7eb2f11..6c8d17ef 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/view.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view.go @@ -48,7 +48,7 @@ var viewPool = sync.Pool{ // // +stateify savable type View struct { - viewEntry `state:"nosave"` + ViewEntry `state:"nosave"` read int write int chunk *chunk diff --git a/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go index eb9aa9a7..db855dfd 100644 --- a/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go @@ -6,14 +6,14 @@ package buffer // objects, if they are not the same. An ElementMapper is not typically // required if: Linker is left as is, Element is left as is, or Linker and // Element are the same type. -type viewElementMapper struct{} +type ViewElementMapper struct{} // linkerFor maps an Element to a Linker. // // This default implementation should be inlined. // //go:nosplit -func (viewElementMapper) linkerFor(elem *View) *View { return elem } +func (ViewElementMapper) linkerFor(elem *View) *View { return elem } // List is an intrusive list. Entries can be added to or removed from the list // in O(1) time and with no additional memory allocations. @@ -27,13 +27,13 @@ func (viewElementMapper) linkerFor(elem *View) *View { return elem } // } // // +stateify savable -type viewList struct { +type ViewList struct { head *View tail *View } // Reset resets list l to the empty state. -func (l *viewList) Reset() { +func (l *ViewList) Reset() { l.head = nil l.tail = nil } @@ -41,21 +41,21 @@ func (l *viewList) Reset() { // Empty returns true iff the list is empty. // //go:nosplit -func (l *viewList) Empty() bool { +func (l *ViewList) Empty() bool { return l.head == nil } // Front returns the first element of list l or nil. // //go:nosplit -func (l *viewList) Front() *View { +func (l *ViewList) Front() *View { return l.head } // Back returns the last element of list l or nil. // //go:nosplit -func (l *viewList) Back() *View { +func (l *ViewList) Back() *View { return l.tail } @@ -64,8 +64,8 @@ func (l *viewList) Back() *View { // NOTE: This is an O(n) operation. // //go:nosplit -func (l *viewList) Len() (count int) { - for e := l.Front(); e != nil; e = (viewElementMapper{}.linkerFor(e)).Next() { +func (l *ViewList) Len() (count int) { + for e := l.Front(); e != nil; e = (ViewElementMapper{}.linkerFor(e)).Next() { count++ } return count @@ -74,12 +74,12 @@ func (l *viewList) Len() (count int) { // PushFront inserts the element e at the front of list l. // //go:nosplit -func (l *viewList) PushFront(e *View) { - linker := viewElementMapper{}.linkerFor(e) +func (l *ViewList) PushFront(e *View) { + linker := ViewElementMapper{}.linkerFor(e) linker.SetNext(l.head) linker.SetPrev(nil) if l.head != nil { - viewElementMapper{}.linkerFor(l.head).SetPrev(e) + ViewElementMapper{}.linkerFor(l.head).SetPrev(e) } else { l.tail = e } @@ -90,13 +90,13 @@ func (l *viewList) PushFront(e *View) { // PushFrontList inserts list m at the start of list l, emptying m. // //go:nosplit -func (l *viewList) PushFrontList(m *viewList) { +func (l *ViewList) PushFrontList(m *ViewList) { if l.head == nil { l.head = m.head l.tail = m.tail } else if m.head != nil { - viewElementMapper{}.linkerFor(l.head).SetPrev(m.tail) - viewElementMapper{}.linkerFor(m.tail).SetNext(l.head) + ViewElementMapper{}.linkerFor(l.head).SetPrev(m.tail) + ViewElementMapper{}.linkerFor(m.tail).SetNext(l.head) l.head = m.head } @@ -107,12 +107,12 @@ func (l *viewList) PushFrontList(m *viewList) { // PushBack inserts the element e at the back of list l. // //go:nosplit -func (l *viewList) PushBack(e *View) { - linker := viewElementMapper{}.linkerFor(e) +func (l *ViewList) PushBack(e *View) { + linker := ViewElementMapper{}.linkerFor(e) linker.SetNext(nil) linker.SetPrev(l.tail) if l.tail != nil { - viewElementMapper{}.linkerFor(l.tail).SetNext(e) + ViewElementMapper{}.linkerFor(l.tail).SetNext(e) } else { l.head = e } @@ -123,13 +123,13 @@ func (l *viewList) PushBack(e *View) { // PushBackList inserts list m at the end of list l, emptying m. // //go:nosplit -func (l *viewList) PushBackList(m *viewList) { +func (l *ViewList) PushBackList(m *ViewList) { if l.head == nil { l.head = m.head l.tail = m.tail } else if m.head != nil { - viewElementMapper{}.linkerFor(l.tail).SetNext(m.head) - viewElementMapper{}.linkerFor(m.head).SetPrev(l.tail) + ViewElementMapper{}.linkerFor(l.tail).SetNext(m.head) + ViewElementMapper{}.linkerFor(m.head).SetPrev(l.tail) l.tail = m.tail } @@ -140,9 +140,9 @@ func (l *viewList) PushBackList(m *viewList) { // InsertAfter inserts e after b. // //go:nosplit -func (l *viewList) InsertAfter(b, e *View) { - bLinker := viewElementMapper{}.linkerFor(b) - eLinker := viewElementMapper{}.linkerFor(e) +func (l *ViewList) InsertAfter(b, e *View) { + bLinker := ViewElementMapper{}.linkerFor(b) + eLinker := ViewElementMapper{}.linkerFor(e) a := bLinker.Next() @@ -151,7 +151,7 @@ func (l *viewList) InsertAfter(b, e *View) { bLinker.SetNext(e) if a != nil { - viewElementMapper{}.linkerFor(a).SetPrev(e) + ViewElementMapper{}.linkerFor(a).SetPrev(e) } else { l.tail = e } @@ -160,9 +160,9 @@ func (l *viewList) InsertAfter(b, e *View) { // InsertBefore inserts e before a. // //go:nosplit -func (l *viewList) InsertBefore(a, e *View) { - aLinker := viewElementMapper{}.linkerFor(a) - eLinker := viewElementMapper{}.linkerFor(e) +func (l *ViewList) InsertBefore(a, e *View) { + aLinker := ViewElementMapper{}.linkerFor(a) + eLinker := ViewElementMapper{}.linkerFor(e) b := aLinker.Prev() eLinker.SetNext(a) @@ -170,7 +170,7 @@ func (l *viewList) InsertBefore(a, e *View) { aLinker.SetPrev(e) if b != nil { - viewElementMapper{}.linkerFor(b).SetNext(e) + ViewElementMapper{}.linkerFor(b).SetNext(e) } else { l.head = e } @@ -179,19 +179,19 @@ func (l *viewList) InsertBefore(a, e *View) { // Remove removes e from l. // //go:nosplit -func (l *viewList) Remove(e *View) { - linker := viewElementMapper{}.linkerFor(e) +func (l *ViewList) Remove(e *View) { + linker := ViewElementMapper{}.linkerFor(e) prev := linker.Prev() next := linker.Next() if prev != nil { - viewElementMapper{}.linkerFor(prev).SetNext(next) + ViewElementMapper{}.linkerFor(prev).SetNext(next) } else if l.head == e { l.head = next } if next != nil { - viewElementMapper{}.linkerFor(next).SetPrev(prev) + ViewElementMapper{}.linkerFor(next).SetPrev(prev) } else if l.tail == e { l.tail = prev } @@ -205,7 +205,7 @@ func (l *viewList) Remove(e *View) { // methods needed by List. // // +stateify savable -type viewEntry struct { +type ViewEntry struct { next *View prev *View } @@ -213,27 +213,27 @@ type viewEntry struct { // Next returns the entry that follows e in the list. // //go:nosplit -func (e *viewEntry) Next() *View { +func (e *ViewEntry) Next() *View { return e.next } // Prev returns the entry that precedes e in the list. // //go:nosplit -func (e *viewEntry) Prev() *View { +func (e *ViewEntry) Prev() *View { return e.prev } // SetNext assigns 'entry' as the entry that follows e in the list. // //go:nosplit -func (e *viewEntry) SetNext(elem *View) { +func (e *ViewEntry) SetNext(elem *View) { e.next = elem } // SetPrev assigns 'entry' as the entry that precedes e in the list. // //go:nosplit -func (e *viewEntry) SetPrev(elem *View) { +func (e *ViewEntry) SetPrev(elem *View) { e.prev = elem } diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go index 413b8493..df5acf67 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go @@ -38,7 +38,7 @@ import ( "gvisor.dev/gvisor/pkg/sync" ) -// contextID is the package for context.Context.Value keys. +// contextID is the package for anyContext.Context.Value keys. type contextID int const ( @@ -51,13 +51,13 @@ const ( _AT_HWCAP2 = 26 ) -// context represents context.Context. -type context interface { +// anyContext represents context.Context. +type anyContext interface { Value(key any) any } // FromContext returns the FeatureSet from the context, if available. -func FromContext(ctx context) FeatureSet { +func FromContext(ctx anyContext) FeatureSet { v := ctx.Value(CtxFeatureSet) if v == nil { return FeatureSet{} // Panics if used. diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go index 044eed07..178428d4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go @@ -18,6 +18,7 @@ package cpuid import ( + "context" "fmt" "io" ) @@ -56,7 +57,7 @@ func (fs *FeatureSet) saveFunction() Static { } // loadFunction saves the function as a static query. -func (fs *FeatureSet) loadFunction(s Static) { +func (fs *FeatureSet) loadFunction(_ context.Context, s Static) { fs.Function = s } @@ -309,7 +310,7 @@ func (fs FeatureSet) HasFeature(feature Feature) bool { // a minimal /proc/cpuinfo, it is missing some fields like "microcode" that are // not always printed in Linux. The bogomips field is simply made up. func (fs FeatureSet) WriteCPUInfoTo(cpu uint, w io.Writer) { - // Avoid many redunant calls here, since this can occasionally appear + // Avoid many redundant calls here, since this can occasionally appear // in the hot path. Read all basic information up front, see above. ax, _, _, _ := fs.query(featureInfo) ef, em, _, f, m, _ := signatureSplit(ax) @@ -361,8 +362,22 @@ func (fs FeatureSet) Intel() bool { // If xSaveInfo isn't supported, cpuid will not fault but will // return bogus values. var ( - xsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ebx - maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx + xsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ebx + maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx + amxTileCfgSize = native(In{Eax: uint32(xSaveInfo), Ecx: 17}).Eax + amxTileDataSize = native(In{Eax: uint32(xSaveInfo), Ecx: 18}).Eax +) + +const ( + // XCR0AMXMask are the bits that enable xsave to operate on AMX TILECFG + // and TILEDATA. + // + // Note: TILECFG and TILEDATA are always either both enabled or both + // disabled. + // + // See Intel® 64 and IA-32 Architectures Software Developer’s Manual Vol.1 + // section 13.3 for details. + XCR0AMXMask = uint64((1 << 17) | (1 << 18)) ) // ExtendedStateSize returns the number of bytes needed to save the "extended @@ -384,15 +399,30 @@ func (fs FeatureSet) ExtendedStateSize() (size, align uint) { return 512, 16 } +// AMXExtendedStateSize returns the number of bytes within the "extended state" +// area that is used for AMX. +func (fs FeatureSet) AMXExtendedStateSize() uint { + if fs.UseXsave() { + xcr0 := xgetbv(0) + if (xcr0 & XCR0AMXMask) != 0 { + return uint(amxTileCfgSize + amxTileDataSize) + } + } + return 0 +} + // ValidXCR0Mask returns the valid bits in control register XCR0. // +// Always exclude AMX bits, because we do not support it. +// TODO(gvisor.dev/issues/9896): Implement AMX Support. +// //go:nosplit func (fs FeatureSet) ValidXCR0Mask() uint64 { if !fs.HasFeature(X86FeatureXSAVE) { return 0 } ax, _, _, dx := fs.query(xSaveInfo) - return uint64(dx)<<32 | uint64(ax) + return (uint64(dx)<<32 | uint64(ax)) &^ XCR0AMXMask } // UseXsave returns the choice of fp state saving instruction. diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go index 8b7c0e1b..bb416970 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go @@ -6,6 +6,8 @@ package cpuid import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -31,12 +33,12 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &fs.hwCap) } -func (fs *FeatureSet) afterLoad() {} +func (fs *FeatureSet) afterLoad(context.Context) {} // +checklocksignore -func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) { +func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(1, &fs.hwCap) - stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(y.(Static)) }) + stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(ctx, y.(Static)) }) } func (i *In) StateTypeName() string { @@ -59,10 +61,10 @@ func (i *In) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &i.Ecx) } -func (i *In) afterLoad() {} +func (i *In) afterLoad(context.Context) {} // +checklocksignore -func (i *In) StateLoad(stateSourceObject state.Source) { +func (i *In) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.Eax) stateSourceObject.Load(1, &i.Ecx) } @@ -91,10 +93,10 @@ func (o *Out) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &o.Edx) } -func (o *Out) afterLoad() {} +func (o *Out) afterLoad(context.Context) {} // +checklocksignore -func (o *Out) StateLoad(stateSourceObject state.Source) { +func (o *Out) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &o.Eax) stateSourceObject.Load(1, &o.Ebx) stateSourceObject.Load(2, &o.Ecx) diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go index 48699f70..1d7f9334 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64_state_autogen.go @@ -6,6 +6,8 @@ package cpuid import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -39,10 +41,10 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(6, &fs.cpuRevDec) } -func (fs *FeatureSet) afterLoad() {} +func (fs *FeatureSet) afterLoad(context.Context) {} // +checklocksignore -func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) { +func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &fs.hwCap) stateSourceObject.Load(1, &fs.cpuFreqMHz) stateSourceObject.Load(2, &fs.cpuImplHex) diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go index b2fcd970..d873d007 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_state_autogen.go @@ -3,6 +3,8 @@ package cpuid import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -26,10 +28,10 @@ func (h *hwCap) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &h.hwCap2) } -func (h *hwCap) afterLoad() {} +func (h *hwCap) afterLoad(context.Context) {} // +checklocksignore -func (h *hwCap) StateLoad(stateSourceObject state.Source) { +func (h *hwCap) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &h.hwCap1) stateSourceObject.Load(1, &h.hwCap2) } diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go index f14b0baf..4831fda3 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/features_amd64.go @@ -127,6 +127,14 @@ func (f Feature) set(s ChangeableSet, on bool) { } } s.Set(In{Eax: uint32(extendedFeatures)}, out) + case 7: + out := s.Query(In{Eax: uint32(extendedFeatureInfo)}) + if on { + out.Edx |= f.bit() + } else { + out.Edx &^= f.bit() + } + s.Set(In{Eax: uint32(extendedFeatureInfo)}, out) } } @@ -170,6 +178,9 @@ func (f Feature) check(fs FeatureSet) bool { return ((dx &^ block6DuplicateMask) & f.bit()) != 0 } return false + case 7: + _, _, _, dx := fs.query(extendedFeatureInfo) + return (dx & f.bit()) != 0 default: return false } @@ -389,6 +400,43 @@ const ( X86Feature3DNOW Feature = 6*32 + 31 ) +// Block 7 constants are the extended features bits in +// CPUID.(EAX=07H,ECX=0):EDX. +const ( + _ Feature = 7*32 + iota // edx bit 0 is reserved. + _ // edx bit 1 is reserved. + X86FeatureAVX512_4VNNIW + X86FeatureAVX512_4FMAPS + X86FeatureFSRM + _ // edx bit 5 is not used in Linux. + _ // edx bit 6 is reserved. + _ // edx bit 7 is reserved. + X86FeatureAVX512_VP2INTERSECT + X86FeatureSRBDS_CTRL + X86FeatureMD_CLEAR + X86FeatureRTM_ALWAYS_ABORT + _ // edx bit 12 is reserved. + X86FeatureTSX_FORCE_ABORT + X86FeatureSERIALIZE + X86FeatureHYBRID_CPU + X86FeatureTSXLDTRK + _ // edx bit 17 is reserved. + X86FeaturePCONFIG + X86FeatureARCH_LBR + X86FeatureIBT + _ // edx bit 21 is reserved. + X86FeatureAMX_BF16 + X86FeatureAVX512_FP16 + X86FeatureAMX_TILE + X86FeatureAMX_INT8 + X86FeatureSPEC_CTRL + X86FeatureINTEL_STIBP + X86FeatureFLUSH_L1D + X86FeatureARCH_CAPABILITIES + X86FeatureCORE_CAPABILITIES + X86FeatureSPEC_CTRL_SSBD +) + // These are the extended floating point state features. They are used to // enumerate floating point features in XCR0, XSTATE_BV, etc. const ( @@ -569,6 +617,32 @@ var allFeatures = map[Feature]allFeatureInfo{ X86FeatureLM: {"lm", true}, X86Feature3DNOWEXT: {"3dnowext", true}, X86Feature3DNOW: {"3dnow", true}, + + // Block 7. + X86FeatureAVX512_4VNNIW: {"avx512_4vnniw", true}, + X86FeatureAVX512_4FMAPS: {"avx512_4fmaps", true}, + X86FeatureFSRM: {"fsrm", true}, + X86FeatureAVX512_VP2INTERSECT: {"avx512_vp2intersect", true}, + X86FeatureSRBDS_CTRL: {"srbds_ctrl", false}, + X86FeatureMD_CLEAR: {"md_clear", true}, + X86FeatureRTM_ALWAYS_ABORT: {"rtm_always_abort", false}, + X86FeatureTSX_FORCE_ABORT: {"tsx_force_abort", false}, + X86FeatureSERIALIZE: {"serialize", true}, + X86FeatureHYBRID_CPU: {"hybrid_cpu", false}, + X86FeatureTSXLDTRK: {"tsxldtrk", true}, + X86FeaturePCONFIG: {"pconfig", true}, + X86FeatureARCH_LBR: {"arch_lbr", true}, + X86FeatureIBT: {"ibt", true}, + X86FeatureAMX_BF16: {"amx_bf16", true}, + X86FeatureAVX512_FP16: {"avx512_fp16", true}, + X86FeatureAMX_TILE: {"amx_tile", true}, + X86FeatureAMX_INT8: {"amx_int8", true}, + X86FeatureSPEC_CTRL: {"spec_ctrl", false}, + X86FeatureINTEL_STIBP: {"intel_stibp", false}, + X86FeatureFLUSH_L1D: {"flush_l1d", true}, + X86FeatureARCH_CAPABILITIES: {"arch_capabilities", true}, + X86FeatureCORE_CAPABILITIES: {"core_capabilities", false}, + X86FeatureSPEC_CTRL_SSBD: {"spec_ctrl_ssbd", false}, } // linuxBlockOrder defines the order in which linux organizes the feature @@ -576,7 +650,7 @@ var allFeatures = map[Feature]allFeatureInfo{ // which doesn't match well here, so for the /proc/cpuinfo generation we simply // re-map the blocks to Linux's ordering and then go through the bits in each // block. -var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3} +var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3, 7} func archFlagOrder(fn func(Feature)) { for _, b := range linuxBlockOrder { diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go index eaf77511..ac2fcbbc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.go @@ -215,6 +215,9 @@ func readMaxCPUFreq() { } +// xgetbv reads an extended control register. +func xgetbv(reg uintptr) uint64 + // archInitialize initializes hostFeatureSet. func archInitialize() { hostFeatureSet = FeatureSet{ diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s index dd21b4bd..04a1433a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/native_amd64.s @@ -23,3 +23,16 @@ TEXT ·native(SB),NOSPLIT|NOFRAME,$0-24 MOVL CX, ret_Ecx+16(FP) MOVL DX, ret_Edx+20(FP) RET + +// xgetbv reads an extended control register. +// +// The code corresponds to: +// +// xgetbv +// +TEXT ·xgetbv(SB),NOSPLIT|NOFRAME,$0-16 + MOVQ reg+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0; + MOVL AX, ret+8(FP) + MOVL DX, ret+12(FP) + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go index 09bcf16b..f21f2e4f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/static_amd64.go @@ -17,6 +17,8 @@ package cpuid +import "context" + // Static is a static CPUID function. // // +stateify savable @@ -90,7 +92,7 @@ func (s Static) ToFeatureSet() FeatureSet { } // afterLoad calls normalize. -func (s Static) afterLoad() { +func (s Static) afterLoad(context.Context) { s.normalize() } diff --git a/vendor/gvisor.dev/gvisor/pkg/log/json.go b/vendor/gvisor.dev/gvisor/pkg/log/json.go index a7f55a9f..a57bc101 100644 --- a/vendor/gvisor.dev/gvisor/pkg/log/json.go +++ b/vendor/gvisor.dev/gvisor/pkg/log/json.go @@ -17,6 +17,8 @@ package log import ( "encoding/json" "fmt" + "runtime" + "strings" "time" ) @@ -62,9 +64,16 @@ type JSONEmitter struct { } // Emit implements Emitter.Emit. -func (e JSONEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) { +func (e JSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) { + logLine := fmt.Sprintf(format, v...) + if _, file, line, ok := runtime.Caller(depth + 1); ok { + if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 { + file = file[slash+1:] // Trim any directory path from the file. + } + logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine) + } j := jsonLog{ - Msg: fmt.Sprintf(format, v...), + Msg: logLine, Level: level, Time: timestamp, } diff --git a/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go b/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go index 0105c068..8f5aab5a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go +++ b/vendor/gvisor.dev/gvisor/pkg/log/json_k8s.go @@ -17,6 +17,8 @@ package log import ( "encoding/json" "fmt" + "runtime" + "strings" "time" ) @@ -33,9 +35,16 @@ type K8sJSONEmitter struct { } // Emit implements Emitter.Emit. -func (e K8sJSONEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) { +func (e K8sJSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) { + logLine := fmt.Sprintf(format, v...) + if _, file, line, ok := runtime.Caller(depth + 1); ok { + if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 { + file = file[slash+1:] // Trim any directory path from the file. + } + logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine) + } j := k8sJSONLog{ - Log: fmt.Sprintf(format, v...), + Log: logLine, Level: level, Time: timestamp, } diff --git a/vendor/gvisor.dev/gvisor/pkg/log/log.go b/vendor/gvisor.dev/gvisor/pkg/log/log.go index af95fb32..581aa77c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/log/log.go +++ b/vendor/gvisor.dev/gvisor/pkg/log/log.go @@ -250,11 +250,11 @@ func (l *BasicLogger) SetLevel(level Level) { var logMu sync.Mutex // log is the default logger. -var log atomic.Value +var log atomic.Pointer[BasicLogger] // Log retrieves the global logger. func Log() *BasicLogger { - return log.Load().(*BasicLogger) + return log.Load() } // SetTarget sets the log target. diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rand.go b/vendor/gvisor.dev/gvisor/pkg/rand/rand.go index be0e85fd..94d2764d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/rand/rand.go +++ b/vendor/gvisor.dev/gvisor/pkg/rand/rand.go @@ -15,8 +15,6 @@ //go:build !linux // +build !linux -// Package rand implements a cryptographically secure pseudorandom number -// generator. package rand import "crypto/rand" diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go b/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go index fa6a2102..0913e8b0 100644 --- a/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go +++ b/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package rand implements a cryptographically secure pseudorandom number -// generator. package rand import ( @@ -54,10 +52,17 @@ type bufferedReader struct { // Read implements io.Reader.Read. func (b *bufferedReader) Read(p []byte) (int, error) { + // In Linux, reads of up to page size bytes will always complete fully. + // See drivers/char/random.c:get_random_bytes_user(). + // NOTE(gvisor.dev/issue/9445): Some applications rely on this behavior. + const pageSize = 4096 + min := len(p) + if min > pageSize { + min = pageSize + } b.mu.Lock() - n, err := b.r.Read(p) - b.mu.Unlock() - return n, err + defer b.mu.Unlock() + return io.ReadAtLeast(b.r, p, min) } // Reader is the default reader. diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rng.go b/vendor/gvisor.dev/gvisor/pkg/rand/rng.go new file mode 100644 index 00000000..5159c202 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/rand/rng.go @@ -0,0 +1,131 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rand implements a cryptographically secure pseudorandom number +// generator. +package rand + +import ( + "encoding/binary" + "fmt" + "io" +) + +// RNG exposes convenience functions based on a cryptographically secure +// io.Reader. +type RNG struct { + Reader io.Reader +} + +// RNGFrom returns a new RNG. r must be a cryptographically secure io.Reader. +func RNGFrom(r io.Reader) RNG { + return RNG{Reader: r} +} + +// Uint16 is analogous to the standard library's math/rand.Uint16. +func (rg *RNG) Uint16() uint16 { + var data [2]byte + if _, err := rg.Reader.Read(data[:]); err != nil { + panic(fmt.Sprintf("Read() failed: %v", err)) + } + return binary.NativeEndian.Uint16(data[:]) +} + +// Uint32 is analogous to the standard library's math/rand.Uint32. +func (rg *RNG) Uint32() uint32 { + var data [4]byte + if _, err := rg.Reader.Read(data[:]); err != nil { + panic(fmt.Sprintf("Read() failed: %v", err)) + } + return binary.NativeEndian.Uint32(data[:]) +} + +// Int63n is analogous to the standard library's math/rand.Int63n. +func (rg *RNG) Int63n(n int64) int64 { + // Based on Go's rand package implementation, but using + // cryptographically secure random numbers. + if n <= 0 { + panic(fmt.Sprintf("n must be positive, but got %d", n)) + } + + // This can be done quickly when n is a power of 2. + if n&(n-1) == 0 { + return int64(rg.Uint64()) & (n - 1) + } + + // The naive approach would be to return rg.Int63()%n, but we need the + // random number to be fair. It shouldn't be biased towards certain + // results, but simple modular math can be very biased. For example, if + // n is 40% of the maximum int64, then the output values of rg.Int63 + // map to return values as follows: + // + // - The first 40% of values map to themselves. + // - The second 40% map to themselves - maximum int64. + // - The remaining 20% map to the themselves - 2 * (maximum int64), + // i.e. the first half of possible output values. + // + // And thus 60% of results map the the first half of possible output + // values, and 40% map the second half. Oops! + // + // We use the same trick as Go to deal with this: shave off the last + // segment (the 20% in our example) to make the RNG more fair. + // + // In the worst case, n is just over half of maximum int64, meaning + // that the upper half of rg.Int63 return values are bad. So each call + // to rg.Int63 has, at worst, a 50% chance of needing a retry. + maximum := int64((1 << 63) - 1 - (1<<63)%uint64(n)) + ret := rg.Int63() + for ret > maximum { + ret = rg.Int63() + } + return ret % n +} + +// Int63 is analogous to the standard library's math/rand.Int63. +func (rg *RNG) Int63() int64 { + return ((1 << 63) - 1) & int64(rg.Uint64()) +} + +// Uint64 is analogous to the standard library's math/rand.Uint64. +func (rg *RNG) Uint64() uint64 { + var data [8]byte + if _, err := rg.Reader.Read(data[:]); err != nil { + panic(fmt.Sprintf("Read() failed: %v", err)) + } + return binary.NativeEndian.Uint64(data[:]) +} + +// Uint32 is analogous to the standard library's math/rand.Uint32. +func Uint32() uint32 { + rng := RNG{Reader: Reader} + return rng.Uint32() +} + +// Int63n is analogous to the standard library's math/rand.Int63n. +func Int63n(n int64) int64 { + rng := RNG{Reader: Reader} + return rng.Int63n(n) +} + +// Int63 is analogous to the standard library's math/rand.Int63. +func Int63() int64 { + rng := RNG{Reader: Reader} + return rng.Int63() +} + +// Uint64 is analogous to the standard library's math/rand.Uint64. +func Uint64() uint64 { + rng := RNG{Reader: Reader} + return rng.Uint64() +} diff --git a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go index 822c0f42..eab682dc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe.go @@ -68,6 +68,7 @@ package sleep import ( + "context" "sync/atomic" "unsafe" @@ -129,7 +130,7 @@ func (s *Sleeper) saveSharedList() *Waker { } // loadSharedList is invoked by stateify. -func (s *Sleeper) loadSharedList(w *Waker) { +func (s *Sleeper) loadSharedList(_ context.Context, w *Waker) { atomic.StorePointer(&s.sharedList, unsafe.Pointer(w)) } @@ -206,7 +207,7 @@ func (s *Sleeper) nextWaker(block, wakepOrSleep bool) *Waker { // See:runtime2.go in the go runtime package for // the values to pass as the waitReason here. const waitReasonSelect = 9 - sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceEvGoBlockSelect, 0) + sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceBlockSelect, 0) } // Pull the shared list out and reverse it in the local @@ -408,7 +409,7 @@ func (w *Waker) saveS() wakerState { } // loadS is invoked by stateify. -func (w *Waker) loadS(ws wakerState) { +func (w *Waker) loadS(_ context.Context, ws wakerState) { if ws.asserted { atomic.StorePointer(&w.s, unsafe.Pointer(&assertedSleeper)) } else { diff --git a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go index d91ace02..b346c34a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/sleep/sleep_unsafe_state_autogen.go @@ -3,6 +3,8 @@ package sleep import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -30,13 +32,13 @@ func (s *Sleeper) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &s.allWakers) } -func (s *Sleeper) afterLoad() {} +func (s *Sleeper) afterLoad(context.Context) {} // +checklocksignore -func (s *Sleeper) StateLoad(stateSourceObject state.Source) { +func (s *Sleeper) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(1, &s.localList) stateSourceObject.Load(2, &s.allWakers) - stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(y.(*Waker)) }) + stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(ctx, y.(*Waker)) }) } func (w *Waker) StateTypeName() string { @@ -63,13 +65,13 @@ func (w *Waker) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &w.allWakersNext) } -func (w *Waker) afterLoad() {} +func (w *Waker) afterLoad(context.Context) {} // +checklocksignore -func (w *Waker) StateLoad(stateSourceObject state.Source) { +func (w *Waker) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(1, &w.next) stateSourceObject.Load(2, &w.allWakersNext) - stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(y.(wakerState)) }) + stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(ctx, y.(wakerState)) }) } func init() { diff --git a/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go b/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go index 10fe0ea5..49b8bd5e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/addr_set.go @@ -2,6 +2,7 @@ package state import ( "bytes" + "context" "fmt" ) @@ -56,7 +57,7 @@ const ( // // +stateify savable type addrSet struct { - root addrnode `state:".(*addrSegmentDataSlices)"` + root addrnode `state:".([]addrFlatSegment)"` } // IsEmpty returns true if the set contains no segments. @@ -228,42 +229,68 @@ func (s *addrSet) UpperBoundGap(max uintptr) addrGapIterator { return seg.PrevGap() } -// Add inserts the given segment into the set and returns true. If the new -// segment can be merged with adjacent segments, Add will do so. If the new -// segment would overlap an existing segment, Add returns false. If Add -// succeeds, all existing iterators are invalidated. -func (s *addrSet) Add(r addrRange, val *objectEncodeState) bool { - if r.Length() <= 0 { - panic(fmt.Sprintf("invalid segment range %v", r)) +// FirstLargeEnoughGap returns the first gap in the set with at least the given +// length. If no such gap exists, FirstLargeEnoughGap returns a terminal +// iterator. +// +// Precondition: trackGaps must be 1. +func (s *addrSet) FirstLargeEnoughGap(minSize uintptr) addrGapIterator { + if addrtrackGaps != 1 { + panic("set is not tracking gaps") } - gap := s.FindGap(r.Start) - if !gap.Ok() { - return false + gap := s.FirstGap() + if gap.Range().Length() >= minSize { + return gap } - if r.End > gap.End() { - return false - } - s.Insert(gap, r, val) - return true + return gap.NextLargeEnoughGap(minSize) } -// AddWithoutMerging inserts the given segment into the set and returns true. -// If it would overlap an existing segment, AddWithoutMerging does nothing and -// returns false. If AddWithoutMerging succeeds, all existing iterators are -// invalidated. -func (s *addrSet) AddWithoutMerging(r addrRange, val *objectEncodeState) bool { - if r.Length() <= 0 { - panic(fmt.Sprintf("invalid segment range %v", r)) +// LastLargeEnoughGap returns the last gap in the set with at least the given +// length. If no such gap exists, LastLargeEnoughGap returns a terminal +// iterator. +// +// Precondition: trackGaps must be 1. +func (s *addrSet) LastLargeEnoughGap(minSize uintptr) addrGapIterator { + if addrtrackGaps != 1 { + panic("set is not tracking gaps") } - gap := s.FindGap(r.Start) - if !gap.Ok() { - return false + gap := s.LastGap() + if gap.Range().Length() >= minSize { + return gap } - if r.End > gap.End() { - return false + return gap.PrevLargeEnoughGap(minSize) +} + +// LowerBoundLargeEnoughGap returns the first gap in the set with at least the +// given length and whose range contains a key greater than or equal to min. If +// no such gap exists, LowerBoundLargeEnoughGap returns a terminal iterator. +// +// Precondition: trackGaps must be 1. +func (s *addrSet) LowerBoundLargeEnoughGap(min, minSize uintptr) addrGapIterator { + if addrtrackGaps != 1 { + panic("set is not tracking gaps") } - s.InsertWithoutMergingUnchecked(gap, r, val) - return true + gap := s.LowerBoundGap(min) + if gap.Range().Length() >= minSize { + return gap + } + return gap.NextLargeEnoughGap(minSize) +} + +// UpperBoundLargeEnoughGap returns the last gap in the set with at least the +// given length and whose range contains a key less than or equal to max. If no +// such gap exists, UpperBoundLargeEnoughGap returns a terminal iterator. +// +// Precondition: trackGaps must be 1. +func (s *addrSet) UpperBoundLargeEnoughGap(max, minSize uintptr) addrGapIterator { + if addrtrackGaps != 1 { + panic("set is not tracking gaps") + } + gap := s.UpperBoundGap(max) + if gap.Range().Length() >= minSize { + return gap + } + return gap.PrevLargeEnoughGap(minSize) } // Insert inserts the given segment into the given gap. If the new segment can @@ -360,6 +387,107 @@ func (s *addrSet) InsertWithoutMergingUnchecked(gap addrGapIterator, r addrRange return addrIterator{gap.node, gap.index} } +// InsertRange inserts the given segment into the set. If the new segment can +// be merged with adjacent segments, InsertRange will do so. InsertRange +// returns an iterator to the segment containing the inserted value (which may +// have been merged with other values). All existing iterators (excluding the +// returned iterator) are invalidated. +// +// If the new segment would overlap an existing segment, or if r is invalid, +// InsertRange panics. +// +// InsertRange searches the set to find the gap to insert into. If the caller +// already has the appropriate GapIterator, or if the caller needs to do +// additional work between finding the gap and insertion, use Insert instead. +func (s *addrSet) InsertRange(r addrRange, val *objectEncodeState) addrIterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range())) + } + if gap.End() < r.End { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range())) + } + return s.Insert(gap, r, val) +} + +// InsertWithoutMergingRange inserts the given segment into the set and returns +// an iterator to the inserted segment. All existing iterators (excluding the +// returned iterator) are invalidated. +// +// If the new segment would overlap an existing segment, or if r is invalid, +// InsertWithoutMergingRange panics. +// +// InsertWithoutMergingRange searches the set to find the gap to insert into. +// If the caller already has the appropriate GapIterator, or if the caller +// needs to do additional work between finding the gap and insertion, use +// InsertWithoutMerging instead. +func (s *addrSet) InsertWithoutMergingRange(r addrRange, val *objectEncodeState) addrIterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range())) + } + if gap.End() < r.End { + panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range())) + } + return s.InsertWithoutMerging(gap, r, val) +} + +// TryInsertRange attempts to insert the given segment into the set. If the new +// segment can be merged with adjacent segments, TryInsertRange will do so. +// TryInsertRange returns an iterator to the segment containing the inserted +// value (which may have been merged with other values). All existing iterators +// (excluding the returned iterator) are invalidated. +// +// If the new segment would overlap an existing segment, TryInsertRange does +// nothing and returns a terminal iterator. +// +// TryInsertRange searches the set to find the gap to insert into. If the +// caller already has the appropriate GapIterator, or if the caller needs to do +// additional work between finding the gap and insertion, use Insert instead. +func (s *addrSet) TryInsertRange(r addrRange, val *objectEncodeState) addrIterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + return addrIterator{} + } + if gap.End() < r.End { + return addrIterator{} + } + return s.Insert(gap, r, val) +} + +// TryInsertWithoutMergingRange attempts to insert the given segment into the +// set. If successful, it returns an iterator to the inserted segment; all +// existing iterators (excluding the returned iterator) are invalidated. If the +// new segment would overlap an existing segment, TryInsertWithoutMergingRange +// does nothing and returns a terminal iterator. +// +// TryInsertWithoutMergingRange searches the set to find the gap to insert +// into. If the caller already has the appropriate GapIterator, or if the +// caller needs to do additional work between finding the gap and insertion, +// use InsertWithoutMerging instead. +func (s *addrSet) TryInsertWithoutMergingRange(r addrRange, val *objectEncodeState) addrIterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + seg, gap := s.Find(r.Start) + if seg.Ok() { + return addrIterator{} + } + if gap.End() < r.End { + return addrIterator{} + } + return s.InsertWithoutMerging(gap, r, val) +} + // Remove removes the given segment and returns an iterator to the vacated gap. // All existing iterators (including seg, but not including the returned // iterator) are invalidated. @@ -396,6 +524,11 @@ func (s *addrSet) RemoveAll() { // RemoveRange removes all segments in the given range. An iterator to the // newly formed gap is returned, and all existing iterators are invalidated. +// +// RemoveRange searches the set to find segments to remove. If the caller +// already has an iterator to either end of the range of segments to remove, or +// if the caller needs to do additional work before removing each segment, +// iterate segments and call Remove in a loop instead. func (s *addrSet) RemoveRange(r addrRange) addrGapIterator { seg, gap := s.Find(r.Start) if seg.Ok() { @@ -403,12 +536,34 @@ func (s *addrSet) RemoveRange(r addrRange) addrGapIterator { gap = s.Remove(seg) } for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() { - seg = s.Isolate(seg, r) + seg = s.SplitAfter(seg, r.End) gap = s.Remove(seg) } return gap } +// RemoveFullRange is equivalent to RemoveRange, except that if any key in the +// given range does not correspond to a segment, RemoveFullRange panics. +func (s *addrSet) RemoveFullRange(r addrRange) addrGapIterator { + seg := s.FindSegment(r.Start) + if !seg.Ok() { + panic(fmt.Sprintf("missing segment at %v", r.Start)) + } + seg = s.SplitBefore(seg, r.Start) + for { + seg = s.SplitAfter(seg, r.End) + end := seg.End() + gap := s.Remove(seg) + if r.End <= end { + return gap + } + seg = gap.NextSegment() + if !seg.Ok() || seg.Start() != end { + panic(fmt.Sprintf("missing segment at %v", end)) + } + } +} + // Merge attempts to merge two neighboring segments. If successful, Merge // returns an iterator to the merged segment, and all existing iterators are // invalidated. Otherwise, Merge returns a terminal iterator. @@ -441,7 +596,68 @@ func (s *addrSet) MergeUnchecked(first, second addrIterator) addrIterator { return addrIterator{} } -// MergeAll attempts to merge all adjacent segments in the set. All existing +// MergePrev attempts to merge the given segment with its predecessor if +// possible, and returns an updated iterator to the extended segment. All +// existing iterators (including seg, but not including the returned iterator) +// are invalidated. +// +// MergePrev is usually used when mutating segments while iterating them in +// order of increasing keys, to attempt merging of each mutated segment with +// its previously-mutated predecessor. In such cases, merging a mutated segment +// with its unmutated successor would incorrectly cause the latter to be +// skipped. +func (s *addrSet) MergePrev(seg addrIterator) addrIterator { + if prev := seg.PrevSegment(); prev.Ok() { + if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() { + seg = mseg + } + } + return seg +} + +// MergeNext attempts to merge the given segment with its successor if +// possible, and returns an updated iterator to the extended segment. All +// existing iterators (including seg, but not including the returned iterator) +// are invalidated. +// +// MergeNext is usually used when mutating segments while iterating them in +// order of decreasing keys, to attempt merging of each mutated segment with +// its previously-mutated successor. In such cases, merging a mutated segment +// with its unmutated predecessor would incorrectly cause the latter to be +// skipped. +func (s *addrSet) MergeNext(seg addrIterator) addrIterator { + if next := seg.NextSegment(); next.Ok() { + if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { + seg = mseg + } + } + return seg +} + +// Unisolate attempts to merge the given segment with its predecessor and +// successor if possible, and returns an updated iterator to the extended +// segment. All existing iterators (including seg, but not including the +// returned iterator) are invalidated. +// +// Unisolate is usually used in conjunction with Isolate when mutating part of +// a single segment in a way that may affect its mergeability. For the reasons +// described by MergePrev and MergeNext, it is usually incorrect to use the +// return value of Unisolate in a loop variable. +func (s *addrSet) Unisolate(seg addrIterator) addrIterator { + if prev := seg.PrevSegment(); prev.Ok() { + if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() { + seg = mseg + } + } + if next := seg.NextSegment(); next.Ok() { + if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { + seg = mseg + } + } + return seg +} + +// MergeAll merges all mergeable adjacent segments in the set. All existing // iterators are invalidated. func (s *addrSet) MergeAll() { seg := s.FirstSegment() @@ -458,15 +674,20 @@ func (s *addrSet) MergeAll() { } } -// MergeRange attempts to merge all adjacent segments that contain a key in the -// specific range. All existing iterators are invalidated. -func (s *addrSet) MergeRange(r addrRange) { +// MergeInsideRange attempts to merge all adjacent segments that contain a key +// in the specific range. All existing iterators are invalidated. +// +// MergeInsideRange only makes sense after mutating the set in a way that may +// change the mergeability of modified segments; callers should prefer to use +// MergePrev or MergeNext during the mutating loop instead (depending on the +// direction of iteration), in order to avoid a redundant search. +func (s *addrSet) MergeInsideRange(r addrRange) { seg := s.LowerBoundSegment(r.Start) if !seg.Ok() { return } next := seg.NextSegment() - for next.Ok() && next.Range().Start < r.End { + for next.Ok() && next.Start() < r.End { if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { seg, next = mseg, mseg.NextSegment() } else { @@ -475,9 +696,14 @@ func (s *addrSet) MergeRange(r addrRange) { } } -// MergeAdjacent attempts to merge the segment containing r.Start with its +// MergeOutsideRange attempts to merge the segment containing r.Start with its // predecessor, and the segment containing r.End-1 with its successor. -func (s *addrSet) MergeAdjacent(r addrRange) { +// +// MergeOutsideRange only makes sense after mutating the set in a way that may +// change the mergeability of modified segments; callers should prefer to use +// MergePrev or MergeNext during the mutating loop instead (depending on the +// direction of iteration), in order to avoid two redundant searches. +func (s *addrSet) MergeOutsideRange(r addrRange) { first := s.FindSegment(r.Start) if first.Ok() { if prev := first.PrevSegment(); prev.Ok() { @@ -522,21 +748,58 @@ func (s *addrSet) SplitUnchecked(seg addrIterator, split uintptr) (addrIterator, return seg2.PrevSegment(), seg2 } -// SplitAt splits the segment straddling split, if one exists. SplitAt returns -// true if a segment was split and false otherwise. If SplitAt splits a -// segment, all existing iterators are invalidated. -func (s *addrSet) SplitAt(split uintptr) bool { - if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) { - s.SplitUnchecked(seg, split) - return true +// SplitBefore ensures that the given segment's start is at least start by +// splitting at start if necessary, and returns an updated iterator to the +// bounded segment. All existing iterators (including seg, but not including +// the returned iterator) are invalidated. +// +// SplitBefore is usually when mutating segments in a range. In such cases, +// when iterating segments in order of increasing keys, the first segment may +// extend beyond the start of the range to be mutated, and needs to be +// SplitBefore to ensure that only the part of the segment within the range is +// mutated. When iterating segments in order of decreasing keys, SplitBefore +// and SplitAfter; i.e. SplitBefore needs to be invoked on each segment, while +// SplitAfter only needs to be invoked on the first. +// +// Preconditions: start < seg.End(). +func (s *addrSet) SplitBefore(seg addrIterator, start uintptr) addrIterator { + if seg.Range().CanSplitAt(start) { + _, seg = s.SplitUnchecked(seg, start) } - return false + return seg } -// Isolate ensures that the given segment's range does not escape r by -// splitting at r.Start and r.End if necessary, and returns an updated iterator -// to the bounded segment. All existing iterators (including seg, but not -// including the returned iterators) are invalidated. +// SplitAfter ensures that the given segment's end is at most end by splitting +// at end if necessary, and returns an updated iterator to the bounded segment. +// All existing iterators (including seg, but not including the returned +// iterator) are invalidated. +// +// SplitAfter is usually used when mutating segments in a range. In such cases, +// when iterating segments in order of increasing keys, each iterated segment +// may extend beyond the end of the range to be mutated, and needs to be +// SplitAfter to ensure that only the part of the segment within the range is +// mutated. When iterating segments in order of decreasing keys, SplitBefore +// and SplitAfter exchange roles; i.e. SplitBefore needs to be invoked on each +// segment, while SplitAfter only needs to be invoked on the first. +// +// Preconditions: seg.Start() < end. +func (s *addrSet) SplitAfter(seg addrIterator, end uintptr) addrIterator { + if seg.Range().CanSplitAt(end) { + seg, _ = s.SplitUnchecked(seg, end) + } + return seg +} + +// Isolate ensures that the given segment's range is a subset of r by splitting +// at r.Start and r.End if necessary, and returns an updated iterator to the +// bounded segment. All existing iterators (including seg, but not including +// the returned iterators) are invalidated. +// +// Isolate is usually used when mutating part of a single segment, or when +// mutating segments in a range where the first segment is not necessarily +// split, making use of SplitBefore/SplitAfter complex. +// +// Preconditions: seg.Range().Overlaps(r). func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator { if seg.Range().CanSplitAt(r.Start) { _, seg = s.SplitUnchecked(seg, r.Start) @@ -547,32 +810,118 @@ func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator { return seg } -// ApplyContiguous applies a function to a contiguous range of segments, -// splitting if necessary. The function is applied until the first gap is -// encountered, at which point the gap is returned. If the function is applied -// across the entire range, a terminal gap is returned. All existing iterators -// are invalidated. +// LowerBoundSegmentSplitBefore combines LowerBoundSegment and SplitBefore. // -// N.B. The Iterator must not be invalidated by the function. -func (s *addrSet) ApplyContiguous(r addrRange, fn func(seg addrIterator)) addrGapIterator { - seg, gap := s.Find(r.Start) - if !seg.Ok() { - return gap +// LowerBoundSegmentSplitBefore is usually used when mutating segments in a +// range while iterating them in order of increasing keys. In such cases, +// LowerBoundSegmentSplitBefore provides an iterator to the first segment to be +// mutated, suitable as the initial value for a loop variable. +func (s *addrSet) LowerBoundSegmentSplitBefore(min uintptr) addrIterator { + seg := s.LowerBoundSegment(min) + if seg.Ok() { + seg = s.SplitBefore(seg, min) } - for { - seg = s.Isolate(seg, r) - fn(seg) - if seg.End() >= r.End { - return addrGapIterator{} - } - gap = seg.NextGap() - if !gap.IsEmpty() { - return gap - } - seg = gap.NextSegment() - if !seg.Ok() { + return seg +} - return addrGapIterator{} +// UpperBoundSegmentSplitAfter combines UpperBoundSegment and SplitAfter. +// +// UpperBoundSegmentSplitAfter is usually used when mutating segments in a +// range while iterating them in order of decreasing keys. In such cases, +// UpperBoundSegmentSplitAfter provides an iterator to the first segment to be +// mutated, suitable as the initial value for a loop variable. +func (s *addrSet) UpperBoundSegmentSplitAfter(max uintptr) addrIterator { + seg := s.UpperBoundSegment(max) + if seg.Ok() { + seg = s.SplitAfter(seg, max) + } + return seg +} + +// VisitRange applies the function f to all segments intersecting the range r, +// in order of ascending keys. Segments will not be split, so f may be called +// on segments lying partially outside r. Non-empty gaps between segments are +// skipped. If a call to f returns false, VisitRange stops iteration +// immediately. +// +// N.B. f must not invalidate iterators into s. +func (s *addrSet) VisitRange(r addrRange, f func(seg addrIterator) bool) { + for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() { + if !f(seg) { + return + } + } +} + +// VisitFullRange is equivalent to VisitRange, except that if any key in r that +// is visited before f returns false does not correspond to a segment, +// VisitFullRange panics. +func (s *addrSet) VisitFullRange(r addrRange, f func(seg addrIterator) bool) { + pos := r.Start + seg := s.FindSegment(r.Start) + for { + if !seg.Ok() { + panic(fmt.Sprintf("missing segment at %v", pos)) + } + if !f(seg) { + return + } + pos = seg.End() + if r.End <= pos { + return + } + seg, _ = seg.NextNonEmpty() + } +} + +// MutateRange applies the function f to all segments intersecting the range r, +// in order of ascending keys. Segments that lie partially outside r are split +// before f is called, such that f only observes segments entirely within r. +// Iterated segments are merged again after f is called. Non-empty gaps between +// segments are skipped. If a call to f returns false, MutateRange stops +// iteration immediately. +// +// MutateRange invalidates all existing iterators. +// +// N.B. f must not invalidate iterators into s. +func (s *addrSet) MutateRange(r addrRange, f func(seg addrIterator) bool) { + seg := s.LowerBoundSegmentSplitBefore(r.Start) + for seg.Ok() && seg.Start() < r.End { + seg = s.SplitAfter(seg, r.End) + cont := f(seg) + seg = s.MergePrev(seg) + if !cont { + s.MergeNext(seg) + return + } + seg = seg.NextSegment() + } + if seg.Ok() { + s.MergePrev(seg) + } +} + +// MutateFullRange is equivalent to MutateRange, except that if any key in r +// that is visited before f returns false does not correspond to a segment, +// MutateFullRange panics. +func (s *addrSet) MutateFullRange(r addrRange, f func(seg addrIterator) bool) { + seg := s.FindSegment(r.Start) + if !seg.Ok() { + panic(fmt.Sprintf("missing segment at %v", r.Start)) + } + seg = s.SplitBefore(seg, r.Start) + for { + seg = s.SplitAfter(seg, r.End) + cont := f(seg) + end := seg.End() + seg = s.MergePrev(seg) + if !cont || r.End <= end { + s.MergeNext(seg) + return + } + seg = seg.NextSegment() + if !seg.Ok() || seg.Start() != end { + panic(fmt.Sprintf("missing segment at %v", end)) } } } @@ -1243,11 +1592,10 @@ func (seg addrIterator) NextGap() addrGapIterator { // Otherwise, exactly one of the iterators returned by PrevNonEmpty will be // non-terminal. func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) { - gap := seg.PrevGap() - if gap.Range().Length() != 0 { - return addrIterator{}, gap + if prev := seg.PrevSegment(); prev.Ok() && prev.End() == seg.Start() { + return prev, addrGapIterator{} } - return gap.PrevSegment(), addrGapIterator{} + return addrIterator{}, seg.PrevGap() } // NextNonEmpty returns the iterated segment's successor if it is adjacent, or @@ -1256,11 +1604,10 @@ func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) { // Otherwise, exactly one of the iterators returned by NextNonEmpty will be // non-terminal. func (seg addrIterator) NextNonEmpty() (addrIterator, addrGapIterator) { - gap := seg.NextGap() - if gap.Range().Length() != 0 { - return addrIterator{}, gap + if next := seg.NextSegment(); next.Ok() && next.Start() == seg.End() { + return next, addrGapIterator{} } - return gap.NextSegment(), addrGapIterator{} + return addrIterator{}, seg.NextGap() } // A GapIterator is conceptually one of: @@ -1379,35 +1726,36 @@ func (gap addrGapIterator) NextLargeEnoughGap(minSize uintptr) addrGapIterator { // // Preconditions: gap is NOT the trailing gap of a non-leaf node. func (gap addrGapIterator) nextLargeEnoughGapHelper(minSize uintptr) addrGapIterator { + for { - for gap.node != nil && - (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) { - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - } - - if gap.node == nil { - return addrGapIterator{} - } - - gap.index++ - for gap.index <= gap.node.nrSegments { - if gap.node.hasChildren { - if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() { - return largeEnoughGap - } - } else { - if gap.Range().Length() >= minSize { - return gap - } + for gap.node != nil && + (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) { + gap.node, gap.index = gap.node.parent, gap.node.parentIndex } - gap.index++ - } - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - if gap.node != nil && gap.index == gap.node.nrSegments { + if gap.node == nil { + return addrGapIterator{} + } + + gap.index++ + for gap.index <= gap.node.nrSegments { + if gap.node.hasChildren { + if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() { + return largeEnoughGap + } + } else { + if gap.Range().Length() >= minSize { + return gap + } + } + gap.index++ + } gap.node, gap.index = gap.node.parent, gap.node.parentIndex + if gap.node != nil && gap.index == gap.node.nrSegments { + + gap.node, gap.index = gap.node.parent, gap.node.parentIndex + } } - return gap.nextLargeEnoughGapHelper(minSize) } // PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or @@ -1433,35 +1781,36 @@ func (gap addrGapIterator) PrevLargeEnoughGap(minSize uintptr) addrGapIterator { // // Preconditions: gap is NOT the first gap of a non-leaf node. func (gap addrGapIterator) prevLargeEnoughGapHelper(minSize uintptr) addrGapIterator { + for { - for gap.node != nil && - (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) { - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - } - - if gap.node == nil { - return addrGapIterator{} - } - - gap.index-- - for gap.index >= 0 { - if gap.node.hasChildren { - if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() { - return largeEnoughGap - } - } else { - if gap.Range().Length() >= minSize { - return gap - } + for gap.node != nil && + (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) { + gap.node, gap.index = gap.node.parent, gap.node.parentIndex } - gap.index-- - } - gap.node, gap.index = gap.node.parent, gap.node.parentIndex - if gap.node != nil && gap.index == 0 { + if gap.node == nil { + return addrGapIterator{} + } + + gap.index-- + for gap.index >= 0 { + if gap.node.hasChildren { + if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() { + return largeEnoughGap + } + } else { + if gap.Range().Length() >= minSize { + return gap + } + } + gap.index-- + } gap.node, gap.index = gap.node.parent, gap.node.parentIndex + if gap.node != nil && gap.index == 0 { + + gap.node, gap.index = gap.node.parent, gap.node.parentIndex + } } - return gap.prevLargeEnoughGapHelper(minSize) } // segmentBeforePosition returns the predecessor segment of the position given @@ -1545,50 +1894,49 @@ func (n *addrnode) writeDebugString(buf *bytes.Buffer, prefix string) { } } -// SegmentDataSlices represents segments from a set as slices of start, end, and -// values. SegmentDataSlices is primarily used as an intermediate representation -// for save/restore and the layout here is optimized for that. +// FlatSegment represents a segment as a single object. FlatSegment is used as +// an intermediate representation for save/restore and tests. // // +stateify savable -type addrSegmentDataSlices struct { - Start []uintptr - End []uintptr - Values []*objectEncodeState +type addrFlatSegment struct { + Start uintptr + End uintptr + Value *objectEncodeState } -// ExportSortedSlices returns a copy of all segments in the given set, in -// ascending key order. -func (s *addrSet) ExportSortedSlices() *addrSegmentDataSlices { - var sds addrSegmentDataSlices +// ExportSlice returns a copy of all segments in the given set, in ascending +// key order. +func (s *addrSet) ExportSlice() []addrFlatSegment { + var fs []addrFlatSegment for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { - sds.Start = append(sds.Start, seg.Start()) - sds.End = append(sds.End, seg.End()) - sds.Values = append(sds.Values, seg.Value()) + fs = append(fs, addrFlatSegment{ + Start: seg.Start(), + End: seg.End(), + Value: seg.Value(), + }) } - sds.Start = sds.Start[:len(sds.Start):len(sds.Start)] - sds.End = sds.End[:len(sds.End):len(sds.End)] - sds.Values = sds.Values[:len(sds.Values):len(sds.Values)] - return &sds + return fs } -// ImportSortedSlices initializes the given set from the given slice. +// ImportSlice initializes the given set from the given slice. // // Preconditions: // - s must be empty. -// - sds must represent a valid set (the segments in sds must have valid +// - fs must represent a valid set (the segments in fs must have valid // lengths that do not overlap). -// - The segments in sds must be sorted in ascending key order. -func (s *addrSet) ImportSortedSlices(sds *addrSegmentDataSlices) error { +// - The segments in fs must be sorted in ascending key order. +func (s *addrSet) ImportSlice(fs []addrFlatSegment) error { if !s.IsEmpty() { return fmt.Errorf("cannot import into non-empty set %v", s) } gap := s.FirstGap() - for i := range sds.Start { - r := addrRange{sds.Start[i], sds.End[i]} + for i := range fs { + f := &fs[i] + r := addrRange{f.Start, f.End} if !gap.Range().IsSupersetOf(r) { - return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i]) + return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: %v => %v", r, f.Value) } - gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap() + gap = s.InsertWithoutMerging(gap, r, f.Value).NextGap() } return nil } @@ -1632,12 +1980,15 @@ func (s *addrSet) countSegments() (segments int) { } return segments } -func (s *addrSet) saveRoot() *addrSegmentDataSlices { - return s.ExportSortedSlices() +func (s *addrSet) saveRoot() []addrFlatSegment { + fs := s.ExportSlice() + + fs = fs[:len(fs):len(fs)] + return fs } -func (s *addrSet) loadRoot(sds *addrSegmentDataSlices) { - if err := s.ImportSortedSlices(sds); err != nil { +func (s *addrSet) loadRoot(_ context.Context, fs []addrFlatSegment) { + if err := s.ImportSlice(fs); err != nil { panic(err) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/state/decode.go b/vendor/gvisor.dev/gvisor/pkg/state/decode.go index 777d7768..5fe76ecc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/decode.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/decode.go @@ -244,7 +244,7 @@ func (ds *decodeState) waitObject(ods *objectDecodeState, encoded wire.Object, c // See decodeObject; we need to wait for the array (if non-nil). ds.wait(ods, objectID(sv.Ref.Root), callback) } else if iv, ok := encoded.(*wire.Interface); ok { - // It's an interface (wait recurisvely). + // It's an interface (wait recursively). ds.waitObject(ods, iv.Value, callback) } else if callback != nil { // Nothing to wait for: execute the callback immediately. @@ -385,7 +385,7 @@ func (ds *decodeState) decodeStruct(ods *objectDecodeState, obj reflect.Value, e if sl, ok := obj.Addr().Interface().(SaverLoader); ok { // Note: may be a registered empty struct which does not // implement the saver/loader interfaces. - sl.StateLoad(Source{internal: od}) + sl.StateLoad(ds.ctx, Source{internal: od}) } } @@ -567,7 +567,7 @@ func (ds *decodeState) decodeObject(ods *objectDecodeState, obj reflect.Value, e case *wire.Interface: ds.decodeInterface(ods, obj, x) default: - // Shoud not happen, not propagated as an error. + // Should not happen, not propagated as an error. Failf("unknown object %#v for %q", encoded, obj.Type().Name()) } } @@ -691,7 +691,7 @@ func (ds *decodeState) Load(obj reflect.Value) { } } }); err != nil { - Failf("error executing callbacks for %#v: %w", ods.obj.Interface(), err) + Failf("error executing callbacks: %w\nfor object %#v", err, ods.obj.Interface()) } // Check if we have any remaining dependency cycles. If there are any diff --git a/vendor/gvisor.dev/gvisor/pkg/state/encode.go b/vendor/gvisor.dev/gvisor/pkg/state/encode.go index 9f15c3c2..79330547 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/encode.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/encode.go @@ -31,7 +31,7 @@ type objectEncodeState struct { // obj is the object value. Note that this may be replaced if we // encounter an object that contains this object. When this happens (in - // resolve), we will update existing references approprately, below, + // resolve), we will update existing references appropriately, below, // and defer a re-encoding of the object. obj reflect.Value @@ -417,7 +417,7 @@ func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) [ Failf("no field in root type %v contains target type %v", rootType, targetType) case reflect.Array: - // Since arrays have homogenous types, all elements have the + // Since arrays have homogeneous types, all elements have the // same size and we can compute where the target lives. This // does not matter for the purpose of typing, but matters for // the purpose of computing the address of the given index. @@ -432,7 +432,7 @@ func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) [ default: // For any other type, there's no possibility of aliasing so if - // the types didn't match earlier then we have an addresss + // the types didn't match earlier then we have an address // collision which shouldn't be possible at this point. Failf("traverse failed for root type %v and target type %v", rootType, targetType) } diff --git a/vendor/gvisor.dev/gvisor/pkg/state/state.go b/vendor/gvisor.dev/gvisor/pkg/state/state.go index 4a9e6ead..0b62eb9c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/state.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/state.go @@ -211,7 +211,7 @@ type SaverLoader interface { StateSave(Sink) // StateLoad loads the state of the object. - StateLoad(Source) + StateLoad(context.Context, Source) } // Source is used for Type.StateLoad. diff --git a/vendor/gvisor.dev/gvisor/pkg/state/types.go b/vendor/gvisor.dev/gvisor/pkg/state/types.go index b96423e1..d3e1cbfe 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/types.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/types.go @@ -198,7 +198,7 @@ var singleFieldOrder = []int{0} // Lookup looks up or registers the given object. // // First, the typeID is searched to see if this has already been appropriately -// reconciled. If no, then a reconcilation will take place that may result in a +// reconciled. If no, then a reconciliation will take place that may result in a // field ordering. If a nil reconciledTypeEntry is returned from this method, // then the object does not support the Type interface. // diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_global_state.go b/vendor/gvisor.dev/gvisor/pkg/sync/fence.go similarity index 76% rename from vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_global_state.go rename to vendor/gvisor.dev/gvisor/pkg/sync/fence.go index dfec4258..6706676a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_global_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/fence.go @@ -1,4 +1,4 @@ -// Copyright 2018 The gVisor Authors. +// Copyright 2023 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stack +package sync -// StackFromEnv is the global stack created in restore run. -// FIXME(b/36201077) -var StackFromEnv *Stack +// MemoryFenceReads ensures that all preceding memory loads happen before +// following memory loads. +func MemoryFenceReads() diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s b/vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s new file mode 100644 index 00000000..87766f1d --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s @@ -0,0 +1,26 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build amd64 +// +build amd64 + +#include "textflag.h" + +// func MemoryFenceReads() +TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0 + // No memory fence is required on x86. However, a compiler fence is + // required to prevent the compiler from reordering memory accesses. The Go + // compiler will not reorder memory accesses around a call to an assembly + // function; compare runtime.publicationBarrier. + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s b/vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s new file mode 100644 index 00000000..f4f9ce9d --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s @@ -0,0 +1,23 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build arm64 +// +build arm64 + +#include "textflag.h" + +// func MemoryFenceReads() +TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0 + DMB $0x9 // ISHLD + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go index 1f7a0330..0f3b58dc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/gate_unsafe.go @@ -140,8 +140,8 @@ func (g *Gate) Close() { // The last call to Leave arrived while we were setting up closingG. return } - // WaitReasonSemacquire/TraceEvGoBlockSync are consistent with WaitGroup. - gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceEvGoBlockSync, 0) + // WaitReasonSemacquire/TraceBlockSync are consistent with WaitGroup. + gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceBlockSync, 0) } //go:norace diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go index 9a5a47a8..d6eef328 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go @@ -20,10 +20,3 @@ const ( WaitReasonChanReceive uint8 = 14 // +checkconst runtime waitReasonChanReceive WaitReasonSemacquire uint8 = 18 // +checkconst runtime waitReasonSemacquire ) - -// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go. -const ( - TraceEvGoBlockRecv byte = 23 // +checkconst runtime traceEvGoBlockRecv - TraceEvGoBlockSelect byte = 24 // +checkconst runtime traceEvGoBlockSelect - TraceEvGoBlockSync byte = 25 // +checkconst runtime traceEvGoBlockSync -) diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer1.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer1.go new file mode 100644 index 00000000..e35e5cf8 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer1.go @@ -0,0 +1,29 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !goexperiment.exectracer2 + +package sync + +// TraceBlockReason constants, from Go's src/runtime/trace.go. +const ( + TraceBlockSelect TraceBlockReason = traceEvGoBlockSelect // +checkconst runtime traceBlockSelect + TraceBlockSync = traceEvGoBlockSync // +checkconst runtime traceBlockSync +) + +// Tracer event types, from Go's src/runtime/trace.go. +const ( + traceEvGoBlockSelect = 24 // +checkconst runtime traceEvGoBlockSelect + traceEvGoBlockSync = 25 // +checkconst runtime traceEvGoBlockSync +) diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go new file mode 100644 index 00000000..f6094cab --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_exectracer2.go @@ -0,0 +1,23 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build goexperiment.exectracer2 + +package sync + +// TraceBlockReason constants, from Go's src/runtime/trace2runtime.go. +const ( + TraceBlockSelect TraceBlockReason = 3 // +checkconst runtime traceBlockSelect + TraceBlockSync TraceBlockReason = 5 // +checkconst runtime traceBlockSync +) diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go new file mode 100644 index 00000000..344b5566 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go @@ -0,0 +1,16 @@ +// Copyright 2023 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package sync + +import ( + "unsafe" +) + +// Use checkoffset to assert that maptype.hasher (the only field we use) has +// the correct offset. +const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset internal/abi MapType.Hasher diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go new file mode 100644 index 00000000..4d7e8b9f --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go @@ -0,0 +1,18 @@ +// Copyright 2023 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// runtime.maptype is moved to internal/abi.MapType in Go 1.21. +// +//go:build !go1.21 + +package sync + +import ( + "unsafe" +) + +// Use checkoffset to assert that maptype.hasher (the only field we use) has +// the correct offset. +const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset runtime maptype.hasher diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s index 85501e54..b6391d2b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s @@ -15,4 +15,4 @@ //go:build !amd64 // This file is intentionally left blank. Other arches don't use -// addrOfSpinning, but we still need an input to the nogo temlate rule. +// addrOfSpinning, but we still need an input to the nogo template rule. diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go index 91cda67b..5bc0a92e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go @@ -3,16 +3,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 && !go1.22 -// +build go1.18,!go1.22 - -// //go:linkname directives type-checked by checklinkname. Any other -// non-linkname assumptions outside the Go 1 compatibility guarantee should -// have an accompanied vet check or version guard build tag. - -// Check type definitions and constants when updating Go version. -// -// TODO(b/165820485): add these checks to checklinkname. +// //go:linkname directives type-checked by checklinkname. +// Runtime type copies checked by checkoffset. package sync @@ -37,12 +29,15 @@ func Goyield() { // splitting and race context are not available where it is called. // //go:nosplit -func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int) { - gopark(unlockf, lock, reason, traceEv, traceskip) +func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int) { + gopark(unlockf, lock, reason, traceReason, traceskip) } //go:linkname gopark runtime.gopark -func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int) +func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int) + +// TraceBlockReason is equivalent to runtime.traceBlockReason. +type TraceBlockReason uint8 //go:linkname wakep runtime.wakep func wakep() @@ -107,10 +102,10 @@ func MapKeyHasher(m any) func(unsafe.Pointer, uintptr) uintptr { panic(fmt.Sprintf("sync.MapKeyHasher: m is %v, not map", rtyp)) } mtyp := *(**maptype)(unsafe.Pointer(&m)) - return mtyp.hasher + return mtyp.Hasher } -// maptype is equivalent to the beginning of runtime.maptype. +// maptype is equivalent to the beginning of internal/abi.MapType. type maptype struct { size uintptr ptrdata uintptr @@ -126,7 +121,7 @@ type maptype struct { key unsafe.Pointer elem unsafe.Pointer bucket unsafe.Pointer - hasher func(unsafe.Pointer, uintptr) uintptr + Hasher func(unsafe.Pointer, uintptr) uintptr // more fields } diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go b/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go index 9adc9532..c90d2d9f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go @@ -39,23 +39,6 @@ type SeqCount struct { // SeqCountEpoch tracks writer critical sections in a SeqCount. type SeqCountEpoch uint32 -// We assume that: -// -// - All functions in sync/atomic that perform a memory read are at least a -// read fence: memory reads before calls to such functions cannot be reordered -// after the call, and memory reads after calls to such functions cannot be -// reordered before the call, even if those reads do not use sync/atomic. -// -// - All functions in sync/atomic that perform a memory write are at least a -// write fence: memory writes before calls to such functions cannot be -// reordered after the call, and memory writes after calls to such functions -// cannot be reordered before the call, even if those writes do not use -// sync/atomic. -// -// As of this writing, the Go memory model completely fails to describe -// sync/atomic, but these properties are implied by -// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8. - // BeginRead indicates the beginning of a reader critical section. Reader // critical sections DO NOT BLOCK writer critical sections, so operations in a // reader critical section MAY RACE with writer critical sections. Races are @@ -104,6 +87,7 @@ func (s *SeqCount) beginReadSlow() SeqCountEpoch { // Reader critical sections do not need to be explicitly terminated; the last // call to ReadOk is implicitly the end of the reader critical section. func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool { + MemoryFenceReads() return atomic.LoadUint32(&s.epoch) == uint32(epoch) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go index 5e91f3dd..9ad06ab2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go @@ -179,6 +179,7 @@ func (d *deadlineTimer) setDeadline(cancelCh *chan struct{}, timer **time.Timer, // "A zero value for t means I/O operations will not time out." // - net.Conn.SetDeadline if t.IsZero() { + *timer = nil return } @@ -546,17 +547,15 @@ func DialContextTCP(ctx context.Context, s *stack.Stack, addr tcpip.FullAddress, type UDPConn struct { deadlineTimer - stack *stack.Stack - ep tcpip.Endpoint - wq *waiter.Queue + ep tcpip.Endpoint + wq *waiter.Queue } // NewUDPConn creates a new UDPConn. -func NewUDPConn(s *stack.Stack, wq *waiter.Queue, ep tcpip.Endpoint) *UDPConn { +func NewUDPConn(wq *waiter.Queue, ep tcpip.Endpoint) *UDPConn { c := &UDPConn{ - stack: s, - ep: ep, - wq: wq, + ep: ep, + wq: wq, } c.deadlineTimer.init() return c @@ -586,7 +585,7 @@ func DialUDP(s *stack.Stack, laddr, raddr *tcpip.FullAddress, network tcpip.Netw } } - c := NewUDPConn(s, &wq, ep) + c := NewUDPConn(&wq, ep) if raddr != nil { if err := c.ep.Connect(*raddr); err != nil { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go index d2e01915..5d4e1170 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go @@ -30,161 +30,13 @@ func Put(b []byte, xsum uint16) { binary.BigEndian.PutUint16(b, xsum) } -func calculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) { - v := initial - - if odd { - v += uint32(buf[0]) - buf = buf[1:] - } - - l := len(buf) - odd = l&1 != 0 - if odd { - l-- - v += uint32(buf[l]) << 8 - } - - for i := 0; i < l; i += 2 { - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - } - - return Combine(uint16(v), uint16(v>>16)), odd -} - -func unrolledCalculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) { - v := initial - - if odd { - v += uint32(buf[0]) - buf = buf[1:] - } - - l := len(buf) - odd = l&1 != 0 - if odd { - l-- - v += uint32(buf[l]) << 8 - } - for (l - 64) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - buf = buf[64:] - l = l - 64 - } - if (l - 32) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - buf = buf[32:] - l = l - 32 - } - if (l - 16) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - buf = buf[16:] - l = l - 16 - } - if (l - 8) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - buf = buf[8:] - l = l - 8 - } - if (l - 4) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - buf = buf[4:] - l = l - 4 - } - - // At this point since l was even before we started unrolling - // there can be only two bytes left to add. - if l != 0 { - v += (uint32(buf[0]) << 8) + uint32(buf[1]) - } - - return Combine(uint16(v), uint16(v>>16)), odd -} - -// Old calculates the checksum (as defined in RFC 1071) of the bytes in -// the given byte array. This function uses a non-optimized implementation. Its -// only retained for reference and to use as a benchmark/test. Most code should -// use the header.Checksum function. -// -// The initial checksum must have been computed on an even number of bytes. -func Old(buf []byte, initial uint16) uint16 { - s, _ := calculateChecksum(buf, false, uint32(initial)) - return s -} - // Checksum calculates the checksum (as defined in RFC 1071) of the bytes in the -// given byte array. This function uses an optimized unrolled version of the -// checksum algorithm. +// given byte array. This function uses an optimized version of the checksum +// algorithm. // // The initial checksum must have been computed on an even number of bytes. func Checksum(buf []byte, initial uint16) uint16 { - s, _ := unrolledCalculateChecksum(buf, false, uint32(initial)) + s, _ := calculateChecksum(buf, false, initial) return s } @@ -197,7 +49,7 @@ type Checksumer struct { // Add adds b to checksum. func (c *Checksumer) Add(b []byte) { if len(b) > 0 { - c.sum, c.odd = unrolledCalculateChecksum(b, c.odd, uint32(c.sum)) + c.sum, c.odd = calculateChecksum(b, c.odd, c.sum) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go new file mode 100644 index 00000000..66b7ab67 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go @@ -0,0 +1,182 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checksum + +import ( + "encoding/binary" + "math/bits" + "unsafe" +) + +// Note: odd indicates whether initial is a partial checksum over an odd number +// of bytes. +func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) { + // Use a larger-than-uint16 accumulator to benefit from parallel summation + // as described in RFC 1071 1.2.C. + acc := uint64(initial) + + // Handle an odd number of previously-summed bytes, and get the return + // value for odd. + if odd { + acc += uint64(buf[0]) + buf = buf[1:] + } + odd = len(buf)&1 != 0 + + // Aligning &buf[0] below is much simpler if len(buf) >= 8; special-case + // smaller bufs. + if len(buf) < 8 { + if len(buf) >= 4 { + acc += (uint64(buf[0]) << 8) + uint64(buf[1]) + acc += (uint64(buf[2]) << 8) + uint64(buf[3]) + buf = buf[4:] + } + if len(buf) >= 2 { + acc += (uint64(buf[0]) << 8) + uint64(buf[1]) + buf = buf[2:] + } + if len(buf) >= 1 { + acc += uint64(buf[0]) << 8 + // buf = buf[1:] is skipped because it's unused and nogo will + // complain. + } + return reduce(acc), odd + } + + // On little-endian architectures, multi-byte loads from buf will load + // bytes in the wrong order. Rather than byte-swap after each load (slow), + // we byte-swap the accumulator before summing any bytes and byte-swap it + // back before returning, which still produces the correct result as + // described in RFC 1071 1.2.B "Byte Order Independence". + // + // acc is at most a uint16 + a uint8, so its upper 32 bits must be 0s. We + // preserve this property by byte-swapping only the lower 32 bits of acc, + // so that additions to acc performed during alignment can't overflow. + acc = uint64(bswapIfLittleEndian32(uint32(acc))) + + // Align &buf[0] to an 8-byte boundary. + bswapped := false + if sliceAddr(buf)&1 != 0 { + // Compute the rest of the partial checksum with bytes swapped, and + // swap back before returning; see the last paragraph of + // RFC 1071 1.2.B. + acc = uint64(bits.ReverseBytes32(uint32(acc))) + bswapped = true + // No `<< 8` here due to the byte swap we just did. + acc += uint64(bswapIfLittleEndian16(uint16(buf[0]))) + buf = buf[1:] + } + if sliceAddr(buf)&2 != 0 { + acc += uint64(*(*uint16)(unsafe.Pointer(&buf[0]))) + buf = buf[2:] + } + if sliceAddr(buf)&4 != 0 { + acc += uint64(*(*uint32)(unsafe.Pointer(&buf[0]))) + buf = buf[4:] + } + + // Sum 64 bytes at a time. Beyond this point, additions to acc may + // overflow, so we have to handle carrying. + for len(buf) >= 64 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[16])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[24])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[32])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[40])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[48])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[56])), carry) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[64:] + } + + // Sum the remaining 0-63 bytes. + if len(buf) >= 32 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[16])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[24])), carry) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[32:] + } + if len(buf) >= 16 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[16:] + } + if len(buf) >= 8 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[8:] + } + if len(buf) >= 4 { + var carry uint64 + acc, carry = bits.Add64(acc, uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), 0) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[4:] + } + if len(buf) >= 2 { + var carry uint64 + acc, carry = bits.Add64(acc, uint64(*(*uint16)(unsafe.Pointer(&buf[0]))), 0) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[2:] + } + if len(buf) >= 1 { + // bswapIfBigEndian16(buf[0]) == bswapIfLittleEndian16(buf[0]<<8). + var carry uint64 + acc, carry = bits.Add64(acc, uint64(bswapIfBigEndian16(uint16(buf[0]))), 0) + acc, _ = bits.Add64(acc, 0, carry) + // buf = buf[1:] is skipped because it's unused and nogo will complain. + } + + // Reduce the checksum to 16 bits and undo byte swaps before returning. + acc16 := bswapIfLittleEndian16(reduce(acc)) + if bswapped { + acc16 = bits.ReverseBytes16(acc16) + } + return acc16, odd +} + +func reduce(acc uint64) uint16 { + // Ideally we would do: + // return uint16(acc>>48) +' uint16(acc>>32) +' uint16(acc>>16) +' uint16(acc) + // for more instruction-level parallelism; however, there is no + // bits.Add16(). + acc = (acc >> 32) + (acc & 0xffff_ffff) // at most 0x1_ffff_fffe + acc32 := uint32(acc>>32 + acc) // at most 0xffff_ffff + acc32 = (acc32 >> 16) + (acc32 & 0xffff) // at most 0x1_fffe + return uint16(acc32>>16 + acc32) // at most 0xffff +} + +func bswapIfLittleEndian32(val uint32) uint32 { + return binary.BigEndian.Uint32((*[4]byte)(unsafe.Pointer(&val))[:]) +} + +func bswapIfLittleEndian16(val uint16) uint16 { + return binary.BigEndian.Uint16((*[2]byte)(unsafe.Pointer(&val))[:]) +} + +func bswapIfBigEndian16(val uint16) uint16 { + return binary.LittleEndian.Uint16((*[2]byte)(unsafe.Pointer(&val))[:]) +} + +func sliceAddr(buf []byte) uintptr { + return uintptr(unsafe.Pointer(unsafe.SliceData(buf))) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go new file mode 100644 index 00000000..936aef74 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go @@ -0,0 +1,3 @@ +// automatically generated by stateify. + +package checksum diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go index 78cc9fdd..63d1fd9f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go @@ -589,7 +589,7 @@ func (*ErrMissingRequiredFields) isError() {} func (*ErrMissingRequiredFields) IgnoreStats() bool { return true } -func (*ErrMissingRequiredFields) String() string { return "mising required fields" } +func (*ErrMissingRequiredFields) String() string { return "missing required fields" } // ErrMulticastInputCannotBeOutput indicates that an input interface matches an // output interface in the same multicast route. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go index cde74e73..060b4a86 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go @@ -32,9 +32,9 @@ func PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber, srcAddr tcpip. xsum = checksum.Checksum(dstAddr.AsSlice(), xsum) // Add the length portion of the checksum to the pseudo-checksum. - tmp := make([]byte, 2) - binary.BigEndian.PutUint16(tmp, totalLen) - xsum = checksum.Checksum(tmp, xsum) + var tmp [2]byte + binary.BigEndian.PutUint16(tmp[:], totalLen) + xsum = checksum.Checksum(tmp[:], xsum) return checksum.Checksum([]byte{0, uint8(protocol)}, xsum) } @@ -57,6 +57,9 @@ func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 { // checksum C, the new checksum C' is: // // C' = C + (-m) + m' = C + (m' - m) + if old == new { + return xsum + } return checksum.Combine(xsum, checksum.Combine(new, ^old)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go index 11230ff4..d4575730 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go @@ -46,6 +46,9 @@ const ( // EthernetMinimumSize is the minimum size of a valid ethernet frame. EthernetMinimumSize = 14 + // EthernetMaximumSize is the maximum size of a valid ethernet frame. + EthernetMaximumSize = 18 + // EthernetAddressSize is the size, in bytes, of an ethernet address. EthernetAddressSize = 6 @@ -82,7 +85,7 @@ const ( // capture all traffic. EthernetProtocolAll tcpip.NetworkProtocolNumber = 0x0003 - // EthernetProtocolPUP is the PARC Universial Packet protocol ethertype. + // EthernetProtocolPUP is the PARC Universal Packet protocol ethertype. EthernetProtocolPUP tcpip.NetworkProtocolNumber = 0x0200 ) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go index d9f84677..743b11b6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/header_state_autogen.go @@ -3,6 +3,8 @@ package header import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -36,10 +38,10 @@ func (t *TCPSynOptions) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(6, &t.Flags) } -func (t *TCPSynOptions) afterLoad() {} +func (t *TCPSynOptions) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPSynOptions) StateLoad(stateSourceObject state.Source) { +func (t *TCPSynOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.MSS) stateSourceObject.Load(1, &t.WS) stateSourceObject.Load(2, &t.TS) @@ -69,10 +71,10 @@ func (r *SACKBlock) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &r.End) } -func (r *SACKBlock) afterLoad() {} +func (r *SACKBlock) afterLoad(context.Context) {} // +checklocksignore -func (r *SACKBlock) StateLoad(stateSourceObject state.Source) { +func (r *SACKBlock) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.Start) stateSourceObject.Load(1, &r.End) } @@ -101,10 +103,10 @@ func (t *TCPOptions) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &t.SACKBlocks) } -func (t *TCPOptions) afterLoad() {} +func (t *TCPOptions) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPOptions) StateLoad(stateSourceObject state.Source) { +func (t *TCPOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.TS) stateSourceObject.Load(1, &t.TSVal) stateSourceObject.Load(2, &t.TSEcr) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go index 4e75ac40..ea1bfcd5 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go @@ -53,7 +53,7 @@ const ( ICMPv6EchoMinimumSize = 8 // ICMPv6ErrorHeaderSize is the size of an ICMP error packet header, - // as per RFC 4443, Apendix A, item 4 and the errata. + // as per RFC 4443, Appendix A, item 4 and the errata. // ... all ICMP error messages shall have exactly // 32 bits of type-specific data, so that receivers can reliably find // the embedded invoking packet even when they don't recognize the diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go index 523441e8..fb6d86a3 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go @@ -378,7 +378,7 @@ func (r IGMPv3ReportGroupAddressRecord) RecordType() IGMPv3ReportRecordType { return IGMPv3ReportRecordType(r[igmpv3ReportGroupAddressRecordTypeOffset]) } -// AuxDataLen returns the length of the auxillary data in this record. +// AuxDataLen returns the length of the auxiliary data in this record. func (r IGMPv3ReportGroupAddressRecord) AuxDataLen() int { return int(r[igmpv3ReportGroupAddressRecordAuxDataLenOffset]) * igmpv3ReportGroupAddressRecordAuxDataLenUnits } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go index d98c1c42..4fb77df2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go @@ -1137,9 +1137,7 @@ func (s IPv4OptionsSerializer) Serialize(b []byte) uint8 { // header ends on a 32 bit boundary. The padding is zero. padded := padIPv4OptionsLength(total) b = b[:padded-total] - for i := range b { - b[i] = 0 - } + clear(b) return padded } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go index 2577c900..7f75b82b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go @@ -110,7 +110,7 @@ const ( // IPv6FragmentExtHdrFragmentOffsetBytesPerUnit is the unit size of a Fragment // extension header's Fragment Offset field. That is, given a Fragment Offset - // of 2, the extension header is indiciating that the fragment's payload + // of 2, the extension header is indicating that the fragment's payload // starts at the 16th byte in the reassembled packet. IPv6FragmentExtHdrFragmentOffsetBytesPerUnit = 8 ) @@ -130,9 +130,7 @@ func padIPv6Option(b []byte) { b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6Pad1ExtHdrOptionIdentifier) default: // Pad with PadN. s := b[ipv6ExtHdrOptionPayloadOffset:] - for i := range s { - s[i] = 0 - } + clear(s) b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6PadNExtHdrOptionIdentifier) b[ipv6ExtHdrOptionLengthOffset] = uint8(len(s)) } @@ -317,7 +315,7 @@ func (*IPv6UnknownExtHdrOption) isIPv6ExtHdrOption() {} // // The return is of the format (option, done, error). done will be true when // Next is unable to return anything because the iterator has reached the end of -// the options data, or an error occured. +// the options data, or an error occurred. func (i *IPv6OptionsExtHdrOptionsIterator) Next() (IPv6ExtHdrOption, bool, error) { for { i.optionOffset = i.nextOptionOffset @@ -462,7 +460,7 @@ func (b IPv6FragmentExtHdr) More() bool { // ID returns the Identification field. // // This value is used to uniquely identify the packet, between a -// souce and destination. +// source and destination. func (b IPv6FragmentExtHdr) ID() uint32 { return binary.BigEndian.Uint32(b[ipv6FragmentExtHdrIdentificationOffset:]) } @@ -568,7 +566,7 @@ func (i *IPv6PayloadIterator) AsRawHeader(consume bool) IPv6RawPayloadHeader { // // The return is of the format (header, done, error). done will be true when // Next is unable to return anything because the iterator has reached the end of -// the payload, or an error occured. +// the payload, or an error occurred. func (i *IPv6PayloadIterator) Next() (IPv6PayloadHeader, bool, error) { i.headerOffset = i.nextOffset i.parseOffset = 0 diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go index 0c33f579..3d1fbd19 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go @@ -422,7 +422,7 @@ func (r MLDv2ReportMulticastAddressRecord) RecordType() MLDv2ReportRecordType { return MLDv2ReportRecordType(r[mldv2ReportMulticastAddressRecordTypeOffset]) } -// AuxDataLen returns the length of the auxillary data in this record. +// AuxDataLen returns the length of the auxiliary data in this record. func (r MLDv2ReportMulticastAddressRecord) AuxDataLen() int { return int(r[mldv2ReportMulticastAddressRecordAuxDataLenOffset]) * mldv2ReportMulticastAddressRecordAuxDataLenUnits } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go index 1dc8111d..5fbae169 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go @@ -63,7 +63,7 @@ const ( // ndpPrefixInformationLength is the expected length, in bytes, of the // body of an NDP Prefix Information option, as per RFC 4861 section // 4.6.2 which specifies that the Length field is 4. Given this, the - // expected length, in bytes, is 30 becuase 4 * lengthByteUnits (8) - 2 + // expected length, in bytes, is 30 because 4 * lengthByteUnits (8) - 2 // (Type & Length) = 30. ndpPrefixInformationLength = 30 @@ -173,7 +173,7 @@ var ( ) // Next returns the next element in the backing NDPOptions, or true if we are -// done, or false if an error occured. +// done, or false if an error occurred. // // The return can be read as option, done, error. Note, option should only be // used if done is false and error is nil. @@ -339,8 +339,8 @@ func (b NDPOptions) Serialize(s NDPOptionsSerializer) int { used := o.serializeInto(b[2:]) // Zero out remaining (padding) bytes, if any exists. - for i := used + 2; i < l; i++ { - b[i] = 0 + if used+2 < l { + clear(b[used+2 : l]) } b = b[l:] @@ -566,9 +566,7 @@ func (o NDPPrefixInformation) serializeInto(b []byte) int { // Zero out the Reserved2 field. reserved2 := b[ndpPrefixInformationReserved2Offset:][:ndpPrefixInformationReserved2Length] - for i := range reserved2 { - reserved2[i] = 0 - } + clear(reserved2) return used } @@ -687,9 +685,7 @@ func (o NDPRecursiveDNSServer) serializeInto(b []byte) int { used := copy(b, o) // Zero out the reserved bytes that are before the Lifetime field. - for i := 0; i < ndpRecursiveDNSServerLifetimeOffset; i++ { - b[i] = 0 - } + clear(b[0:ndpRecursiveDNSServerLifetimeOffset]) return used } @@ -782,9 +778,7 @@ func (o NDPDNSSearchList) serializeInto(b []byte) int { used := copy(b, o) // Zero out the reserved bytes that are before the Lifetime field. - for i := 0; i < ndpDNSSearchListLifetimeOffset; i++ { - b[i] = 0 - } + clear(b[0:ndpDNSSearchListLifetimeOffset]) return used } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go index 33a85fdb..adcfd77c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go @@ -27,7 +27,7 @@ import ( // pkt.Data. // // Returns true if the header was successfully parsed. -func ARP(pkt stack.PacketBufferPtr) bool { +func ARP(pkt *stack.PacketBuffer) bool { _, ok := pkt.NetworkHeader().Consume(header.ARPSize) if ok { pkt.NetworkProtocolNumber = header.ARPProtocolNumber @@ -39,7 +39,7 @@ func ARP(pkt stack.PacketBufferPtr) bool { // header with the IPv4 header. // // Returns true if the header was successfully parsed. -func IPv4(pkt stack.PacketBufferPtr) bool { +func IPv4(pkt *stack.PacketBuffer) bool { hdr, ok := pkt.Data().PullUp(header.IPv4MinimumSize) if !ok { return false @@ -71,7 +71,7 @@ func IPv4(pkt stack.PacketBufferPtr) bool { // IPv6 parses an IPv6 packet found in pkt.Data and populates pkt's network // header with the IPv6 header. -func IPv6(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) { +func IPv6(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) { hdr, ok := pkt.Data().PullUp(header.IPv6MinimumSize) if !ok { return 0, 0, 0, false, false @@ -157,7 +157,7 @@ traverseExtensions: // header with the UDP header. // // Returns true if the header was successfully parsed. -func UDP(pkt stack.PacketBufferPtr) bool { +func UDP(pkt *stack.PacketBuffer) bool { _, ok := pkt.TransportHeader().Consume(header.UDPMinimumSize) pkt.TransportProtocolNumber = header.UDPProtocolNumber return ok @@ -167,7 +167,7 @@ func UDP(pkt stack.PacketBufferPtr) bool { // header with the TCP header. // // Returns true if the header was successfully parsed. -func TCP(pkt stack.PacketBufferPtr) bool { +func TCP(pkt *stack.PacketBuffer) bool { // TCP header is variable length, peek at it first. hdrLen := header.TCPMinimumSize hdr, ok := pkt.Data().PullUp(hdrLen) @@ -191,7 +191,7 @@ func TCP(pkt stack.PacketBufferPtr) bool { // if present. // // Returns true if an ICMPv4 header was successfully parsed. -func ICMPv4(pkt stack.PacketBufferPtr) bool { +func ICMPv4(pkt *stack.PacketBuffer) bool { if _, ok := pkt.TransportHeader().Consume(header.ICMPv4MinimumSize); ok { pkt.TransportProtocolNumber = header.ICMPv4ProtocolNumber return true @@ -203,7 +203,7 @@ func ICMPv4(pkt stack.PacketBufferPtr) bool { // if present. // // Returns true if an ICMPv6 header was successfully parsed. -func ICMPv6(pkt stack.PacketBufferPtr) bool { +func ICMPv6(pkt *stack.PacketBuffer) bool { hdr, ok := pkt.Data().PullUp(header.ICMPv6MinimumSize) if !ok { return false diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go index 2d38928c..fe41e8d4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/tcp.go @@ -216,6 +216,15 @@ const ( // TCPHeaderMaximumSize is the maximum header size of a TCP packet. TCPHeaderMaximumSize = TCPMinimumSize + TCPOptionsMaximumSize + // TCPTotalHeaderMaximumSize is the maximum size of headers from all layers in + // a TCP packet. It analogous to MAX_TCP_HEADER in Linux. + // + // TODO(b/319936470): Investigate why this needs to be at least 140 bytes. In + // Linux this value is at least 160, but in theory we should be able to use + // 138. In practice anything less than 140 starts to break GSO on gVNIC + // hardware. + TCPTotalHeaderMaximumSize = 160 + // TCPProtocolNumber is TCP's transport protocol number. TCPProtocolNumber tcpip.TransportProtocolNumber = 6 @@ -689,7 +698,7 @@ func Acceptable(segSeq seqnum.Value, segLen seqnum.Size, rcvNxt, rcvAcc seqnum.V return segSeq.InRange(rcvNxt, rcvAcc.Add(1)) } // Page 70 of RFC 793 allows packets that can be made "acceptable" by trimming - // the payload, so we'll accept any payload that overlaps the receieve window. + // the payload, so we'll accept any payload that overlaps the receive window. // segSeq < rcvAcc is more correct according to RFC, however, Linux does it // differently, it uses segSeq <= rcvAcc, we'd want to keep the same behavior // as Linux. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go index e973a7bb..9aa457fe 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/internal/tcp/tcp_state_autogen.go @@ -3,6 +3,8 @@ package tcp import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -24,10 +26,10 @@ func (offset *TSOffset) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &offset.milliseconds) } -func (offset *TSOffset) afterLoad() {} +func (offset *TSOffset) afterLoad(context.Context) {} // +checklocksignore -func (offset *TSOffset) StateLoad(stateSourceObject state.Source) { +func (offset *TSOffset) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &offset.milliseconds) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel.go index 1e2843be..3ea016cb 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package channel provides the implemention of channel-based data-link layer +// Package channel provides the implementation of channel-based data-link layer // endpoints. Such endpoints allow injection of inbound packets and store // outbound packets in a channel. package channel @@ -43,7 +43,7 @@ type NotificationHandle struct { type queue struct { // c is the outbound packet channel. - c chan stack.PacketBufferPtr + c chan *stack.PacketBuffer mu sync.RWMutex // +checklocks:mu notify []*NotificationHandle @@ -58,7 +58,7 @@ func (q *queue) Close() { q.closed = true } -func (q *queue) Read() stack.PacketBufferPtr { +func (q *queue) Read() *stack.PacketBuffer { select { case p := <-q.c: return p @@ -67,7 +67,7 @@ func (q *queue) Read() stack.PacketBufferPtr { } } -func (q *queue) ReadContext(ctx context.Context) stack.PacketBufferPtr { +func (q *queue) ReadContext(ctx context.Context) *stack.PacketBuffer { select { case pkt := <-q.c: return pkt @@ -76,7 +76,7 @@ func (q *queue) ReadContext(ctx context.Context) stack.PacketBufferPtr { } } -func (q *queue) Write(pkt stack.PacketBufferPtr) tcpip.Error { +func (q *queue) Write(pkt *stack.PacketBuffer) tcpip.Error { // q holds the PacketBuffer. q.mu.RLock() if q.closed { @@ -152,7 +152,7 @@ type Endpoint struct { func New(size int, mtu uint32, linkAddr tcpip.LinkAddress) *Endpoint { return &Endpoint{ q: &queue{ - c: make(chan stack.PacketBufferPtr, size), + c: make(chan *stack.PacketBuffer, size), }, mtu: mtu, linkAddr: linkAddr, @@ -167,20 +167,20 @@ func (e *Endpoint) Close() { } // Read does non-blocking read one packet from the outbound packet queue. -func (e *Endpoint) Read() stack.PacketBufferPtr { +func (e *Endpoint) Read() *stack.PacketBuffer { return e.q.Read() } // ReadContext does blocking read for one packet from the outbound packet queue. // It can be cancelled by ctx, and in this case, it returns nil. -func (e *Endpoint) ReadContext(ctx context.Context) stack.PacketBufferPtr { +func (e *Endpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { return e.q.ReadContext(ctx) } // Drain removes all outbound packets from the channel and counts them. func (e *Endpoint) Drain() int { c := 0 - for pkt := e.Read(); !pkt.IsNil(); pkt = e.Read() { + for pkt := e.Read(); pkt != nil; pkt = e.Read() { pkt.DecRef() c++ } @@ -194,7 +194,7 @@ func (e *Endpoint) NumQueued() int { // InjectInbound injects an inbound packet. If the endpoint is not attached, the // packet is not delivered. -func (e *Endpoint) InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { +func (e *Endpoint) InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { e.mu.RLock() d := e.dispatcher e.mu.RUnlock() @@ -287,4 +287,7 @@ func (*Endpoint) ARPHardwareType() header.ARPHardwareType { } // AddHeader implements stack.LinkEndpoint.AddHeader. -func (*Endpoint) AddHeader(stack.PacketBufferPtr) {} +func (*Endpoint) AddHeader(*stack.PacketBuffer) {} + +// ParseHeader implements stack.LinkEndpoint.ParseHeader. +func (*Endpoint) ParseHeader(*stack.PacketBuffer) bool { return true } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel_state_autogen.go index 7730b59b..aaa789eb 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/channel/channel_state_autogen.go @@ -3,6 +3,8 @@ package channel import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -24,10 +26,10 @@ func (n *NotificationHandle) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &n.n) } -func (n *NotificationHandle) afterLoad() {} +func (n *NotificationHandle) afterLoad(context.Context) {} // +checklocksignore -func (n *NotificationHandle) StateLoad(stateSourceObject state.Source) { +func (n *NotificationHandle) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &n.n) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go index 1a327d84..0f04483a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go @@ -51,7 +51,7 @@ func (e *Endpoint) Init(child stack.LinkEndpoint, embedder stack.NetworkDispatch } // DeliverNetworkPacket implements stack.NetworkDispatcher. -func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { +func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { e.mu.RLock() d := e.dispatcher e.mu.RUnlock() @@ -61,7 +61,7 @@ func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk } // DeliverLinkPacket implements stack.NetworkDispatcher. -func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { +func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { e.mu.RLock() d := e.dispatcher e.mu.RUnlock() @@ -144,6 +144,11 @@ func (e *Endpoint) ARPHardwareType() header.ARPHardwareType { } // AddHeader implements stack.LinkEndpoint.AddHeader. -func (e *Endpoint) AddHeader(pkt stack.PacketBufferPtr) { +func (e *Endpoint) AddHeader(pkt *stack.PacketBuffer) { e.child.AddHeader(pkt) } + +// ParseHeader implements stack.LinkEndpoint.ParseHeader. +func (e *Endpoint) ParseHeader(pkt *stack.PacketBuffer) bool { + return e.child.ParseHeader(pkt) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/packetsocket/packetsocket.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/packetsocket/packetsocket.go index d309f653..16284360 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/packetsocket/packetsocket.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/packetsocket/packetsocket.go @@ -40,7 +40,7 @@ func New(lower stack.LinkEndpoint) stack.LinkEndpoint { } // DeliverNetworkPacket implements stack.NetworkDispatcher. -func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { +func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { e.Endpoint.DeliverLinkPacket(protocol, pkt) e.Endpoint.DeliverNetworkPacket(protocol, pkt) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go index 39dc5ad0..b7d3c674 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go @@ -97,7 +97,7 @@ type TimeoutHandler interface { // OnReassemblyTimeout will be called with the first fragment (or nil, if the // first fragment has not been received) of a packet whose reassembly has // timed out. - OnReassemblyTimeout(pkt stack.PacketBufferPtr) + OnReassemblyTimeout(pkt *stack.PacketBuffer) } // NewFragmentation creates a new Fragmentation. @@ -155,8 +155,8 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea // to be given here outside of the FragmentID struct because IPv6 should not use // the protocol to identify a fragment. func (f *Fragmentation) Process( - id FragmentID, first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) ( - stack.PacketBufferPtr, uint8, bool, error) { + id FragmentID, first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) ( + *stack.PacketBuffer, uint8, bool, error) { if first > last { return nil, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs) } @@ -251,12 +251,12 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) { if h := f.timeoutHandler; timedOut && h != nil { h.OnReassemblyTimeout(r.pkt) } - if !r.pkt.IsNil() { + if r.pkt != nil { r.pkt.DecRef() r.pkt = nil } for _, h := range r.holes { - if !h.pkt.IsNil() { + if h.pkt != nil { h.pkt.DecRef() h.pkt = nil } @@ -308,7 +308,7 @@ type PacketFragmenter struct { // // reserve is the number of bytes that should be reserved for the headers in // each generated fragment. -func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32, reserve int) PacketFragmenter { +func MakePacketFragmenter(pkt *stack.PacketBuffer, fragmentPayloadLen uint32, reserve int) PacketFragmenter { // As per RFC 8200 Section 4.5, some IPv6 extension headers should not be // repeated in each fragment. However we do not currently support any header // of that kind yet, so the following computation is valid for both IPv4 and @@ -339,7 +339,7 @@ func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32, // Note that the returned packet will not have its network and link headers // populated, but space for them will be reserved. The transport header will be // stored in the packet's data. -func (pf *PacketFragmenter) BuildNextFragment() (stack.PacketBufferPtr, int, int, bool) { +func (pf *PacketFragmenter) BuildNextFragment() (*stack.PacketBuffer, int, int, bool) { if pf.currentFragment >= pf.fragmentCount { panic("BuildNextFragment should not be called again after the last fragment was returned") } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go index 21c5774e..9727e873 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation_state_autogen.go @@ -3,6 +3,8 @@ package fragmentation import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -26,10 +28,10 @@ func (l *reassemblerList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *reassemblerList) afterLoad() {} +func (l *reassemblerList) afterLoad(context.Context) {} // +checklocksignore -func (l *reassemblerList) StateLoad(stateSourceObject state.Source) { +func (l *reassemblerList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -54,10 +56,10 @@ func (e *reassemblerEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *reassemblerEntry) afterLoad() {} +func (e *reassemblerEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *reassemblerEntry) StateLoad(stateSourceObject state.Source) { +func (e *reassemblerEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go index 873e034f..59fea4bb 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go @@ -30,7 +30,7 @@ type hole struct { final bool // pkt is the fragment packet if hole is filled. We keep the whole pkt rather // than the fragmented payload to prevent binding to specific buffer types. - pkt stack.PacketBufferPtr + pkt *stack.PacketBuffer } type reassembler struct { @@ -43,7 +43,7 @@ type reassembler struct { filled int done bool createdAt tcpip.MonotonicTime - pkt stack.PacketBufferPtr + pkt *stack.PacketBuffer } func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler { @@ -60,7 +60,7 @@ func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler { return r } -func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) (stack.PacketBufferPtr, uint8, bool, int, error) { +func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (*stack.PacketBuffer, uint8, bool, int, error) { r.mu.Lock() defer r.mu.Unlock() if r.done { @@ -145,7 +145,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt st // options received in the first fragment should be used - and they should // override options from following fragments. if first == 0 { - if !r.pkt.IsNil() { + if r.pkt != nil { r.pkt.DecRef() } r.pkt = pkt.IncRef() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go index b381c4c0..c99a4fe2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go @@ -20,7 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" ) -// ForwardingError represents an error that occured while trying to forward +// ForwardingError represents an error that occurred while trying to forward // a packet. type ForwardingError interface { isForwardingError() @@ -75,7 +75,7 @@ func (*ErrLinkLocalDestinationAddress) isForwardingError() {} func (*ErrLinkLocalDestinationAddress) String() string { return "link local destination address" } -// ErrHostUnreachable indicates that the destinatino host could not be reached. +// ErrHostUnreachable indicates that the destination host could not be reached. type ErrHostUnreachable struct{} func (*ErrHostUnreachable) isForwardingError() {} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go index 884ea056..54c38b83 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go @@ -390,7 +390,9 @@ func (g *GenericMulticastProtocolState) MakeAllNonMemberLocked() { switch g.mode { case protocolModeV2: v2ReportBuilder = g.opts.Protocol.NewReportV2Builder() - handler = func(groupAddress tcpip.Address, _ *multicastGroupState) { + handler = func(groupAddress tcpip.Address, info *multicastGroupState) { + info.cancelDelayedReportJob() + // Send a report immediately to announce us leaving the group. v2ReportBuilder.AddRecord( MulticastGroupProtocolV2ReportRecordChangeToIncludeMode, diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go index 4ca3d838..f569f27d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/ip_state_autogen.go @@ -3,6 +3,8 @@ package ip import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -21,10 +23,10 @@ func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrMessageTooLong) afterLoad() {} +func (e *ErrMessageTooLong) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) { +func (e *ErrMessageTooLong) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNoMulticastPendingQueueBufferSpace) StateTypeName() string { @@ -42,10 +44,10 @@ func (e *ErrNoMulticastPendingQueueBufferSpace) StateSave(stateSinkObject state. e.beforeSave() } -func (e *ErrNoMulticastPendingQueueBufferSpace) afterLoad() {} +func (e *ErrNoMulticastPendingQueueBufferSpace) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNoMulticastPendingQueueBufferSpace) StateLoad(stateSourceObject state.Source) { +func (e *ErrNoMulticastPendingQueueBufferSpace) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func init() { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go index d1e112a0..4e802366 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go @@ -66,7 +66,7 @@ type MultiCounterIPForwardingStats struct { UnknownOutputEndpoint tcpip.MultiCounterStat // NoMulticastPendingQueueBufferSpace is the number of multicast packets that - // were dropped due to insufficent buffer space in the pending packet queue. + // were dropped due to insufficient buffer space in the pending packet queue. NoMulticastPendingQueueBufferSpace tcpip.MultiCounterStat // OutgoingDeviceNoBufferSpace is the number of packets that were dropped due diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go index 5bade5ae..41227e6e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast/route_table.go @@ -116,7 +116,7 @@ func (r *InstalledRoute) SetLastUsedTimestamp(monotonicTime tcpip.MonotonicTime) // for the entry. For such routes, packets are added to an expiring queue until // a route is installed. type PendingRoute struct { - packets []stack.PacketBufferPtr + packets []*stack.PacketBuffer // expiration is the timestamp at which the pending route should be expired. // @@ -265,7 +265,7 @@ func (r *RouteTable) cleanupPendingRoutes() { func (r *RouteTable) newPendingRoute() PendingRoute { return PendingRoute{ - packets: make([]stack.PacketBufferPtr, 0, r.config.MaxPendingQueueSize), + packets: make([]*stack.PacketBuffer, 0, r.config.MaxPendingQueueSize), expiration: r.config.Clock.NowMonotonic().Add(DefaultPendingRouteExpiration), } } @@ -326,7 +326,7 @@ func (e GetRouteResultState) String() string { // // If the relevant pending route queue is at max capacity, then returns false. // Otherwise, returns true. -func (r *RouteTable) GetRouteOrInsertPending(key stack.UnicastSourceAndMulticastDestination, pkt stack.PacketBufferPtr) (GetRouteResult, bool) { +func (r *RouteTable) GetRouteOrInsertPending(key stack.UnicastSourceAndMulticastDestination, pkt *stack.PacketBuffer) (GetRouteResult, bool) { r.installedMu.RLock() defer r.installedMu.RUnlock() @@ -374,7 +374,7 @@ func (r *RouteTable) getOrCreatePendingRouteRLocked(key stack.UnicastSourceAndMu // returned. The caller assumes ownership of these packets and is responsible // for forwarding and releasing them. If an installed route already exists for // the provided key, then it is overwritten. -func (r *RouteTable) AddInstalledRoute(key stack.UnicastSourceAndMulticastDestination, route *InstalledRoute) []stack.PacketBufferPtr { +func (r *RouteTable) AddInstalledRoute(key stack.UnicastSourceAndMulticastDestination, route *InstalledRoute) []*stack.PacketBuffer { r.installedMu.Lock() defer r.installedMu.Unlock() r.installedRoutes[key] = route diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go index 875eca47..b6da8bd4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go @@ -243,7 +243,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool { // of the original packet that caused the ICMP one to be sent. This information // is used to find out which transport endpoint must be notified about the ICMP // packet. We only expect the payload, not the enclosing ICMP packet. -func (e *endpoint) handleControl(errInfo stack.TransportError, pkt stack.PacketBufferPtr) { +func (e *endpoint) handleControl(errInfo stack.TransportError, pkt *stack.PacketBuffer) { h, ok := pkt.Data().PullUp(header.IPv4MinimumSize) if !ok { return @@ -280,7 +280,7 @@ func (e *endpoint) handleControl(errInfo stack.TransportError, pkt stack.PacketB e.dispatcher.DeliverTransportError(srcAddr, dstAddr, ProtocolNumber, p, errInfo, pkt) } -func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { +func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) { received := e.stats.icmp.packetsReceived h := header.ICMPv4(pkt.TransportHeader().Slice()) if len(h) < header.ICMPv4MinimumSize { @@ -607,7 +607,7 @@ func (*icmpReasonHostUnreachable) isICMPReason() {} // the problematic packet. It incorporates as much of that packet as // possible as well as any error metadata as is available. returnError // expects pkt to hold a valid IPv4 packet as per the wire format. -func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, deliveredLocally bool) tcpip.Error { +func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error { origIPHdr := header.IPv4(pkt.NetworkHeader().Slice()) origIPHdrSrc := origIPHdr.SourceAddress() origIPHdrDst := origIPHdr.DestinationAddress() @@ -807,7 +807,7 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del } // OnReassemblyTimeout implements fragmentation.TimeoutHandler. -func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) { +func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) { // OnReassemblyTimeout sends a Time Exceeded Message, as per RFC 792: // // If a host reassembling a fragmented datagram cannot complete the @@ -816,7 +816,7 @@ func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) { // // If fragment zero is not available then no time exceeded need be sent at // all. - if !pkt.IsNil() { + if pkt != nil { p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go index 6db1cf17..afbe4d38 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go @@ -283,7 +283,7 @@ func (*igmpState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration { func (igmp *igmpState) init(ep *endpoint) { igmp.ep = ep igmp.genericMulticastProtocol.Init(&ep.mu, ip.GenericMulticastProtocolOptions{ - Rand: ep.protocol.stack.Rand(), + Rand: ep.protocol.stack.InsecureRNG(), Clock: ep.protocol.stack.Clock(), Protocol: igmp, MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax, @@ -328,7 +328,7 @@ func (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType head } // +checklocks:igmp.ep.mu -func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageType header.IGMPType, hasRouterAlertOption bool) bool { +func (igmp *igmpState) isPacketValidLocked(pkt *stack.PacketBuffer, messageType header.IGMPType, hasRouterAlertOption bool) bool { // We can safely assume that the IP header is valid if we got this far. iph := header.IPv4(pkt.NetworkHeader().Slice()) @@ -346,7 +346,7 @@ func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageTyp // handleIGMP handles an IGMP packet. // // +checklocks:igmp.ep.mu -func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOption bool) { +func (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer, hasRouterAlertOption bool) { received := igmp.ep.stats.igmp.packetsReceived hdr, ok := pkt.Data().PullUp(pkt.Data().Size()) if !ok { @@ -521,7 +521,7 @@ func (igmp *igmpState) writePacketInner(buf *buffer.View, reportStat tcpip.Multi }) defer pkt.DecRef() - addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, false /* allowExpired */) + addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, tcpip.Address{} /* srcHint */, false /* allowExpired */) if addressEndpoint == nil { return false, nil } @@ -586,7 +586,7 @@ func (igmp *igmpState) softLeaveAll() { igmp.genericMulticastProtocol.MakeAllNonMemberLocked() } -// initializeAll attemps to initialize the IGMP state for each group that has +// initializeAll attempts to initialize the IGMP state for each group that has // been joined locally. // // +checklocks:igmp.ep.mu diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go index 2e5ab026..d8fff536 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go @@ -137,7 +137,7 @@ func (e *endpoint) getIGMPVersionLocked() IGMPVersion { } // HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint. -func (e *endpoint) HandleLinkResolutionFailure(pkt stack.PacketBufferPtr) { +func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) { // If we are operating as a router, return an ICMP error to the original // packet's sender. if pkt.NetworkPacketInfo.IsForwardedPacket { @@ -437,7 +437,18 @@ func (e *endpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber { return e.protocol.Number() } -func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) tcpip.Error { +// getID returns a random uint16 number (other than zero) to be used as ID in +// the IPv4 header. +func (e *endpoint) getID() uint16 { + rng := e.protocol.stack.SecureRNG() + id := rng.Uint16() + for id == 0 { + id = rng.Uint16() + } + return id +} + +func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) tcpip.Error { hdrLen := header.IPv4MinimumSize var optLen int if options != nil { @@ -455,10 +466,9 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketB // RFC 6864 section 4.3 mandates uniqueness of ID values for non-atomic // datagrams. Since the DF bit is never being set here, all datagrams // are non-atomic and need an ID. - id := e.protocol.ids[hashRoute(srcAddr, dstAddr, params.Protocol, e.protocol.hashIV)%buckets].Add(1) ipH.Encode(&header.IPv4Fields{ TotalLength: uint16(length), - ID: uint16(id), + ID: e.getID(), TTL: params.TTL, TOS: params.TOS, Protocol: uint8(params.Protocol), @@ -475,7 +485,7 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketB // fragment. It returns the number of fragments handled and the number of // fragments left to be processed. The IP header must already be present in the // original packet. -func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt stack.PacketBufferPtr, handler func(stack.PacketBufferPtr) tcpip.Error) (int, int, tcpip.Error) { +func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt *stack.PacketBuffer, handler func(*stack.PacketBuffer) tcpip.Error) (int, int, tcpip.Error) { // Round the MTU down to align to 8 bytes. fragmentPayloadSize := networkMTU &^ 7 networkHeader := header.IPv4(pkt.NetworkHeader().Slice()) @@ -498,7 +508,7 @@ func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt stack. } // WritePacket writes a packet to the given destination address and protocol. -func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt stack.PacketBufferPtr) tcpip.Error { +func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error { if err := e.addIPHeader(r.LocalAddress(), r.RemoteAddress(), pkt, params, nil /* options */); err != nil { return err } @@ -506,7 +516,7 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, return e.writePacket(r, pkt) } -func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error { +func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error { netHeader := header.IPv4(pkt.NetworkHeader().Slice()) dstAddr := netHeader.DestinationAddress() @@ -538,7 +548,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip. return e.writePacketPostRouting(r, pkt, false /* headerIncluded */) } -func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBufferPtr, headerIncluded bool) tcpip.Error { +func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error { if r.Loop()&stack.PacketLoop != 0 { // If the packet was generated by the stack (not a raw/packet endpoint // where a packet may be written with the header included), then we can @@ -573,7 +583,7 @@ func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBuffer // is set but the packet must be fragmented for the non-forwarding case. return &tcpip.ErrMessageTooLong{} } - sent, remain, err := e.handleFragments(r, networkMTU, pkt, func(fragPkt stack.PacketBufferPtr) tcpip.Error { + sent, remain, err := e.handleFragments(r, networkMTU, pkt, func(fragPkt *stack.PacketBuffer) tcpip.Error { // TODO(gvisor.dev/issue/3884): Evaluate whether we want to send each // fragment one by one using WritePacket() (current strategy) or if we // want to create a PacketBufferList from the fragments and feed it to @@ -594,7 +604,7 @@ func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBuffer } // WriteHeaderIncludedPacket implements stack.NetworkEndpoint. -func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error { +func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error { // The packet already has an IP header, but there are a few required // checks. h, ok := pkt.Data().PullUp(header.IPv4MinimumSize) @@ -628,7 +638,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBuf // non-atomic datagrams, so assign an ID to all such datagrams // according to the definition given in RFC 6864 section 4. if ipH.Flags()&header.IPv4FlagDontFragment == 0 || ipH.Flags()&header.IPv4FlagMoreFragments != 0 || ipH.FragmentOffset() > 0 { - ipH.SetID(uint16(e.protocol.ids[hashRoute(r.LocalAddress(), r.RemoteAddress(), 0 /* protocol */, e.protocol.hashIV)%buckets].Add(1))) + ipH.SetID(e.getID()) } } @@ -656,7 +666,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBuf // updating the options. // // This method should be invoked by the endpoint that received the pkt. -func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBufferPtr, updateOptions bool) ip.ForwardingError { +func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt *stack.PacketBuffer, updateOptions bool) ip.ForwardingError { h := header.IPv4(pkt.NetworkHeader().Slice()) stk := e.protocol.stack @@ -726,7 +736,7 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu } // forwardUnicastPacket attempts to forward a packet to its final destination. -func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.ForwardingError { +func (e *endpoint) forwardUnicastPacket(pkt *stack.PacketBuffer) ip.ForwardingError { hView := pkt.NetworkHeader().View() defer hView.Release() h := header.IPv4(hView.AsSlice()) @@ -804,7 +814,7 @@ func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.Forwarding // HandlePacket is called by the link layer when new ipv4 packets arrive for // this endpoint. -func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { +func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) { stats := e.stats.ip stats.PacketsReceived.Increment() @@ -863,7 +873,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { // handleLocalPacket is like HandlePacket except it does not perform the // prerouting iptables hook or check for loopback traffic that originated from // outside of the netstack (i.e. martian loopback packets). -func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksum bool) { +func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum bool) { stats := e.stats.ip stats.PacketsReceived.Increment() @@ -935,7 +945,7 @@ func validateAddressesForForwarding(h header.IPv4) ip.ForwardingError { // // This method should be invoked for incoming multicast packets using the // endpoint that received the packet. -func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt stack.PacketBufferPtr) ip.ForwardingError { +func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt *stack.PacketBuffer) ip.ForwardingError { if err := validateAddressesForForwarding(h); err != nil { return err } @@ -988,7 +998,7 @@ func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt stack.PacketBufferP return &ip.ErrHostUnreachable{} } -func (e *endpoint) updateOptionsForForwarding(pkt stack.PacketBufferPtr) ip.ForwardingError { +func (e *endpoint) updateOptionsForForwarding(pkt *stack.PacketBuffer) ip.ForwardingError { h := header.IPv4(pkt.NetworkHeader().Slice()) if opts := h.Options(); len(opts) != 0 { newOpts, _, optProblem := e.processIPOptions(pkt, opts, &optionUsageForward{}) @@ -1023,7 +1033,7 @@ func (e *endpoint) updateOptionsForForwarding(pkt stack.PacketBufferPtr) ip.Forw // provided installedRoute. // // This method should be invoked by the endpoint that received the pkt. -func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) ip.ForwardingError { +func (e *endpoint) forwardValidatedMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) ip.ForwardingError { // Per RFC 1812 section 5.2.1.3, // // Based on the IP source and destination addresses found in the datagram @@ -1056,7 +1066,7 @@ func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, in // of the provided outgoingInterface. // // This method should be invoked by the endpoint that received the pkt. -func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBufferPtr, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError { +func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt *stack.PacketBuffer, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError { h := header.IPv4(pkt.NetworkHeader().Slice()) // Per RFC 1812 section 5.2.1.3, @@ -1083,7 +1093,7 @@ func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBu return e.forwardPacketWithRoute(route, pkt, true /* updateOptions */) } -func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPtr, inNICName string) { +func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) { pkt.NICID = e.nic.ID() // Raw socket packets are delivered based solely on the transport protocol @@ -1194,7 +1204,7 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) { stats.Forwarding.Errors.Increment() } -func (e *endpoint) deliverPacketLocally(h header.IPv4, pkt stack.PacketBufferPtr, inNICName string) { +func (e *endpoint) deliverPacketLocally(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) { stats := e.stats // iptables filtering. All packets that reach here are intended for // this machine and will not be forwarded. @@ -1352,8 +1362,8 @@ func (e *endpoint) Close() { // AddAndAcquirePermanentAddress implements stack.AddressableEndpoint. func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, properties stack.AddressProperties) (stack.AddressEndpoint, tcpip.Error) { - e.mu.RLock() - defer e.mu.RUnlock() + e.mu.Lock() + defer e.mu.Unlock() ep, err := e.addressableEndpointState.AddAndAcquireAddress(addr, properties, stack.Permanent) if err == nil { @@ -1364,7 +1374,7 @@ func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, p // sendQueuedReports sends queued igmp reports. // -// +checklocksread:e.mu +// +checklocks:e.mu // +checklocksalias:e.igmp.ep.mu=e.mu func (e *endpoint) sendQueuedReports() { e.igmp.sendQueuedReports() @@ -1413,18 +1423,18 @@ func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp boo } // AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint. -func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint { +func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint { e.mu.RLock() defer e.mu.RUnlock() - return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, allowExpired) + return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint, allowExpired) } // acquireOutgoingPrimaryAddressRLocked is like AcquireOutgoingPrimaryAddress // but with locking requirements // // +checklocksread:e.mu -func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint { - return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired) +func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint { + return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, allowExpired) } // PrimaryAddresses implements stack.AddressableEndpoint. @@ -1514,6 +1524,8 @@ type protocol struct { ids []atomicbitops.Uint32 hashIV uint32 + // idTS is the unix timestamp in milliseconds 'ids' was last accessed. + idTS atomicbitops.Int64 fragmentation *fragmentation.Fragmentation @@ -1704,7 +1716,7 @@ func (p *protocol) MulticastRouteLastUsedTime(addresses stack.UnicastSourceAndMu return timestamp, nil } -func (p *protocol) forwardPendingMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) { +func (p *protocol) forwardPendingMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) { defer pkt.DecRef() // Attempt to forward the packet using the endpoint that it originally @@ -1761,7 +1773,7 @@ func (p *protocol) isSubnetLocalBroadcastAddress(addr tcpip.Address) bool { // returns the parsed IP header. // // Returns true if the IP header was successfully parsed. -func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bool) { +func (p *protocol) parseAndValidate(pkt *stack.PacketBuffer) (*buffer.View, bool) { transProtoNum, hasTransportHdr, ok := p.Parse(pkt) if !ok { return nil, false @@ -1785,7 +1797,7 @@ func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bo return pkt.NetworkHeader().View(), true } -func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip.TransportProtocolNumber) { +func (p *protocol) parseTransport(pkt *stack.PacketBuffer, transProtoNum tcpip.TransportProtocolNumber) { if transProtoNum == header.ICMPv4ProtocolNumber { // The transport layer will handle transport layer parsing errors. _ = parse.ICMPv4(pkt) @@ -1803,7 +1815,7 @@ func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip } // Parse implements stack.NetworkProtocol. -func (*protocol) Parse(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) { +func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) { if ok := parse.IPv4(pkt); !ok { return 0, false, false } @@ -1830,7 +1842,7 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv4Type, code header.ICMPv4 } // SendRejectionError implements stack.RejectIPv4WithHandler. -func (p *protocol) SendRejectionError(pkt stack.PacketBufferPtr, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error { +func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error { switch rejectWith { case stack.RejectIPv4WithICMPNetUnreachable: return p.returnError(&icmpReasonNetworkUnreachable{}, pkt, inputHook) @@ -1872,7 +1884,7 @@ func calculateNetworkMTU(linkMTU, networkHeaderSize uint32) (uint32, tcpip.Error return networkMTU - networkHeaderSize, nil } -func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool { +func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32) bool { payload := len(pkt.TransportHeader().Slice()) + pkt.Data().Size() return pkt.GSOOptions.Type == stack.GSONone && uint32(payload) > networkMTU } @@ -1949,7 +1961,7 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol { return NewProtocolWithOptions(Options{})(s) } -func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (stack.PacketBufferPtr, bool) { +func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (*stack.PacketBuffer, bool) { fragPkt, offset, copied, more := pf.BuildNextFragment() fragPkt.NetworkProtocolNumber = ProtocolNumber @@ -2290,7 +2302,7 @@ type optionTracker struct { // // If there were no errors during parsing, the new set of options is returned as // a new buffer. -func (e *endpoint) processIPOptions(pkt stack.PacketBufferPtr, opts header.IPv4Options, usage optionsUsage) (header.IPv4Options, optionTracker, *header.IPv4OptParameterProblem) { +func (e *endpoint) processIPOptions(pkt *stack.PacketBuffer, opts header.IPv4Options, usage optionsUsage) (header.IPv4Options, optionTracker, *header.IPv4OptParameterProblem) { stats := e.stats.ip optIter := opts.MakeIterator() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go index d538eecb..7c522509 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go @@ -3,6 +3,8 @@ package ipv4 import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -21,10 +23,10 @@ func (i *icmpv4DestinationUnreachableSockError) StateSave(stateSinkObject state. i.beforeSave() } -func (i *icmpv4DestinationUnreachableSockError) afterLoad() {} +func (i *icmpv4DestinationUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4DestinationUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (i *icmpv4DestinationHostUnreachableSockError) StateTypeName() string { @@ -45,10 +47,10 @@ func (i *icmpv4DestinationHostUnreachableSockError) StateSave(stateSinkObject st stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4DestinationHostUnreachableSockError) afterLoad() {} +func (i *icmpv4DestinationHostUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -70,10 +72,10 @@ func (i *icmpv4DestinationNetUnreachableSockError) StateSave(stateSinkObject sta stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4DestinationNetUnreachableSockError) afterLoad() {} +func (i *icmpv4DestinationNetUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -95,10 +97,10 @@ func (i *icmpv4DestinationPortUnreachableSockError) StateSave(stateSinkObject st stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4DestinationPortUnreachableSockError) afterLoad() {} +func (i *icmpv4DestinationPortUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -120,10 +122,10 @@ func (i *icmpv4DestinationProtoUnreachableSockError) StateSave(stateSinkObject s stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad() {} +func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -145,10 +147,10 @@ func (i *icmpv4SourceRouteFailedSockError) StateSave(stateSinkObject state.Sink) stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4SourceRouteFailedSockError) afterLoad() {} +func (i *icmpv4SourceRouteFailedSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4SourceRouteFailedSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4SourceRouteFailedSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -170,10 +172,10 @@ func (i *icmpv4SourceHostIsolatedSockError) StateSave(stateSinkObject state.Sink stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4SourceHostIsolatedSockError) afterLoad() {} +func (i *icmpv4SourceHostIsolatedSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4SourceHostIsolatedSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4SourceHostIsolatedSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -195,10 +197,10 @@ func (i *icmpv4DestinationHostUnknownSockError) StateSave(stateSinkObject state. stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) } -func (i *icmpv4DestinationHostUnknownSockError) afterLoad() {} +func (i *icmpv4DestinationHostUnknownSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv4DestinationHostUnknownSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv4DestinationHostUnknownSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } @@ -222,10 +224,10 @@ func (e *icmpv4FragmentationNeededSockError) StateSave(stateSinkObject state.Sin stateSinkObject.Save(1, &e.mtu) } -func (e *icmpv4FragmentationNeededSockError) afterLoad() {} +func (e *icmpv4FragmentationNeededSockError) afterLoad(context.Context) {} // +checklocksignore -func (e *icmpv4FragmentationNeededSockError) StateLoad(stateSourceObject state.Source) { +func (e *icmpv4FragmentationNeededSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.icmpv4DestinationUnreachableSockError) stateSourceObject.Load(1, &e.mtu) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/icmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/icmp.go index a98332dc..4028f91d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/icmp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/icmp.go @@ -164,7 +164,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool { // the original packet that caused the ICMP one to be sent. This information is // used to find out which transport endpoint must be notified about the ICMP // packet. -func (e *endpoint) handleControl(transErr stack.TransportError, pkt stack.PacketBufferPtr) { +func (e *endpoint) handleControl(transErr stack.TransportError, pkt *stack.PacketBuffer) { h, ok := pkt.Data().PullUp(header.IPv6MinimumSize) if !ok { return @@ -267,7 +267,7 @@ func getTargetLinkAddr(it header.NDPOptionIterator) (tcpip.LinkAddress, bool) { }) } -func isMLDValid(pkt stack.PacketBufferPtr, iph header.IPv6, routerAlert *header.IPv6RouterAlertOption) bool { +func isMLDValid(pkt *stack.PacketBuffer, iph header.IPv6, routerAlert *header.IPv6RouterAlertOption) bool { // As per RFC 2710 section 3: // All MLD messages described in this document are sent with a link-local // IPv6 Source Address, an IPv6 Hop Limit of 1, and an IPv6 Router Alert @@ -287,7 +287,7 @@ func isMLDValid(pkt stack.PacketBufferPtr, iph header.IPv6, routerAlert *header. return true } -func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr, hasFragmentHeader bool, routerAlert *header.IPv6RouterAlertOption) { +func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, routerAlert *header.IPv6RouterAlertOption) { sent := e.stats.icmp.packetsSent received := e.stats.icmp.packetsReceived h := header.ICMPv6(pkt.TransportHeader().Slice()) @@ -540,6 +540,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr, hasFragmentHeader bool, // na.SetSolicitedFlag(!unspecifiedSource) na.SetOverrideFlag(true) + na.SetRouterFlag(e.Forwarding()) na.SetTargetAddress(targetAddr) na.Options().Serialize(optsSerializer) packet.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{ @@ -595,7 +596,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr, hasFragmentHeader bool, // We just got an NA from a node that owns an address we are performing // DAD on, implying the address is not unique. In this case we let the - // stack know so it can handle such a scenario and do nothing furthur with + // stack know so it can handle such a scenario and do nothing further with // the NDP NA. // // We would get an error if the address no longer exists or the address @@ -913,7 +914,7 @@ func (e *endpoint) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot if localAddr.BitLen() == 0 { // Find an address that we can use as our source address. - addressEndpoint := e.AcquireOutgoingPrimaryAddress(remoteAddr, false /* allowExpired */) + addressEndpoint := e.AcquireOutgoingPrimaryAddress(remoteAddr, tcpip.Address{} /* srcHint */, false /* allowExpired */) if addressEndpoint == nil { return &tcpip.ErrNetworkUnreachable{} } @@ -960,7 +961,7 @@ type icmpReason interface { type icmpReasonParameterProblem struct { code header.ICMPv6Code - // pointer is defined in the RFC 4443 setion 3.4 which reads: + // pointer is defined in the RFC 4443 section 3.4 which reads: // // Pointer Identifies the octet offset within the invoking packet // where the error was detected. @@ -1052,7 +1053,7 @@ func (*icmpReasonReassemblyTimeout) respondsToMulticast() bool { // returnError takes an error descriptor and generates the appropriate ICMP // error packet for IPv6 and sends it. -func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, deliveredLocally bool) tcpip.Error { +func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error { origIPHdr := header.IPv6(pkt.NetworkHeader().Slice()) origIPHdrSrc := origIPHdr.SourceAddress() origIPHdrDst := origIPHdr.DestinationAddress() @@ -1217,14 +1218,14 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del } // OnReassemblyTimeout implements fragmentation.TimeoutHandler. -func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) { +func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) { // OnReassemblyTimeout sends a Time Exceeded Message as per RFC 2460 Section // 4.5: // // If the first fragment (i.e., the one with a Fragment Offset of zero) has // been received, an ICMP Time Exceeded -- Fragment Reassembly Time Exceeded // message should be sent to the source of that fragment. - if !pkt.IsNil() { + if pkt != nil { p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6.go index ff5b4465..b0da8b4b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6.go @@ -16,9 +16,7 @@ package ipv6 import ( - "encoding/binary" "fmt" - "hash/fnv" "math" "reflect" "sort" @@ -30,7 +28,6 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/header/parse" - "gvisor.dev/gvisor/pkg/tcpip/network/hash" "gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation" "gvisor.dev/gvisor/pkg/tcpip/network/internal/ip" "gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast" @@ -286,7 +283,7 @@ func (*endpoint) DuplicateAddressProtocol() tcpip.NetworkProtocolNumber { } // HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint. -func (e *endpoint) HandleLinkResolutionFailure(pkt stack.PacketBufferPtr) { +func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) { // If we are operating as a router, we should return an ICMP error to the // original packet's sender. if pkt.NetworkPacketInfo.IsForwardedPacket { @@ -724,7 +721,7 @@ func (e *endpoint) MaxHeaderLength() uint16 { return e.nic.MaxHeaderLength() + header.IPv6MinimumSize } -func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) tcpip.Error { +func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) tcpip.Error { extHdrsLen := extensionHeaders.Length() length := pkt.Size() + extensionHeaders.Length() if length > math.MaxUint16 { @@ -743,7 +740,7 @@ func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, para return nil } -func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool { +func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32) bool { payload := len(pkt.TransportHeader().Slice()) + pkt.Data().Size() return pkt.GSOOptions.Type == stack.GSONone && uint32(payload) > networkMTU } @@ -753,7 +750,7 @@ func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool { // fragments left to be processed. The IP header must already be present in the // original packet. The transport header protocol number is required to avoid // parsing the IPv6 extension headers. -func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt stack.PacketBufferPtr, transProto tcpip.TransportProtocolNumber, handler func(stack.PacketBufferPtr) tcpip.Error) (int, int, tcpip.Error) { +func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt *stack.PacketBuffer, transProto tcpip.TransportProtocolNumber, handler func(*stack.PacketBuffer) tcpip.Error) (int, int, tcpip.Error) { networkHeader := header.IPv6(pkt.NetworkHeader().Slice()) // TODO(gvisor.dev/issue/3912): Once the Authentication or ESP Headers are @@ -777,7 +774,7 @@ func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt stack. pf := fragmentation.MakePacketFragmenter(pkt, fragmentPayloadLen, calculateFragmentReserve(pkt)) defer pf.Release() - id := e.protocol.ids[hashRoute(r, e.protocol.hashIV)%buckets].Add(1) + id := e.getFragmentID() var n int for { @@ -795,7 +792,7 @@ func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt stack. } // WritePacket writes a packet to the given destination address and protocol. -func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt stack.PacketBufferPtr) tcpip.Error { +func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error { dstAddr := r.RemoteAddress() if err := addIPHeader(r.LocalAddress(), dstAddr, pkt, params, nil /* extensionHeaders */); err != nil { return err @@ -829,7 +826,7 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, return e.writePacket(r, pkt, params.Protocol, false /* headerIncluded */) } -func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr, protocol tcpip.TransportProtocolNumber, headerIncluded bool) tcpip.Error { +func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer, protocol tcpip.TransportProtocolNumber, headerIncluded bool) tcpip.Error { if r.Loop()&stack.PacketLoop != 0 { // If the packet was generated by the stack (not a raw/packet endpoint // where a packet may be written with the header included), then we can @@ -863,7 +860,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr, protoc // not by routers along a packet's delivery path. return &tcpip.ErrMessageTooLong{} } - sent, remain, err := e.handleFragments(r, networkMTU, pkt, protocol, func(fragPkt stack.PacketBufferPtr) tcpip.Error { + sent, remain, err := e.handleFragments(r, networkMTU, pkt, protocol, func(fragPkt *stack.PacketBuffer) tcpip.Error { // TODO(gvisor.dev/issue/3884): Evaluate whether we want to send each // fragment one by one using WritePacket() (current strategy) or if we // want to create a PacketBufferList from the fragments and feed it to @@ -885,7 +882,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr, protoc } // WriteHeaderIncludedPacket implements stack.NetworkEndpoint. -func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error { +func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error { // The packet already has an IP header, but there are a few required checks. h, ok := pkt.Data().PullUp(header.IPv6MinimumSize) if !ok { @@ -951,7 +948,7 @@ func validateAddressesForForwarding(h header.IPv6) ip.ForwardingError { // forwardUnicastPacket attempts to forward a unicast packet to its final // destination. -func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.ForwardingError { +func (e *endpoint) forwardUnicastPacket(pkt *stack.PacketBuffer) ip.ForwardingError { h := header.IPv6(pkt.NetworkHeader().Slice()) if err := validateAddressesForForwarding(h); err != nil { @@ -1020,7 +1017,7 @@ func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.Forwarding // forwardPacketWithRoute emits the pkt using the provided route. // // This method should be invoked by the endpoint that received the pkt. -func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBufferPtr) ip.ForwardingError { +func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt *stack.PacketBuffer) ip.ForwardingError { h := header.IPv6(pkt.NetworkHeader().Slice()) stk := e.protocol.stack @@ -1072,7 +1069,7 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu // HandlePacket is called by the link layer when new ipv6 packets arrive for // this endpoint. -func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { +func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) { stats := e.stats.ip stats.PacketsReceived.Increment() @@ -1135,7 +1132,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { // handleLocalPacket is like HandlePacket except it does not perform the // prerouting iptables hook or check for loopback traffic that originated from // outside of the netstack (i.e. martian loopback packets). -func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksum bool) { +func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum bool) { stats := e.stats.ip stats.PacketsReceived.Increment() @@ -1162,7 +1159,7 @@ func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksu // // This method should be invoked for incoming multicast packets using the // endpoint that received the packet. -func (e *endpoint) forwardMulticastPacket(h header.IPv6, pkt stack.PacketBufferPtr) ip.ForwardingError { +func (e *endpoint) forwardMulticastPacket(h header.IPv6, pkt *stack.PacketBuffer) ip.ForwardingError { if err := validateAddressesForForwarding(h); err != nil { return err } @@ -1208,7 +1205,7 @@ func (e *endpoint) forwardMulticastPacket(h header.IPv6, pkt stack.PacketBufferP // provided installedRoute. // // This method should be invoked by the endpoint that received the pkt. -func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) ip.ForwardingError { +func (e *endpoint) forwardValidatedMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) ip.ForwardingError { // Per RFC 1812 section 5.2.1.3, // // Based on the IP source and destination addresses found in the datagram @@ -1241,7 +1238,7 @@ func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, in // of the provided outgoing interface. // // This method should be invoked by the endpoint that received the pkt. -func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBufferPtr, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError { +func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt *stack.PacketBuffer, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError { h := header.IPv6(pkt.NetworkHeader().Slice()) // Per RFC 1812 section 5.2.1.3, @@ -1302,7 +1299,7 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) { stats.Forwarding.Errors.Increment() } -func (e *endpoint) handleValidatedPacket(h header.IPv6, pkt stack.PacketBufferPtr, inNICName string) { +func (e *endpoint) handleValidatedPacket(h header.IPv6, pkt *stack.PacketBuffer, inNICName string) { pkt.NICID = e.nic.ID() // Raw socket packets are delivered based solely on the transport protocol @@ -1361,7 +1358,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv6, pkt stack.PacketBufferPt } } -func (e *endpoint) deliverPacketLocally(h header.IPv6, pkt stack.PacketBufferPtr, inNICName string) { +func (e *endpoint) deliverPacketLocally(h header.IPv6, pkt *stack.PacketBuffer, inNICName string) { stats := e.stats.ip // iptables filtering. All packets that reach here are intended for @@ -1377,7 +1374,7 @@ func (e *endpoint) deliverPacketLocally(h header.IPv6, pkt stack.PacketBufferPtr _ = e.processExtensionHeaders(h, pkt, false /* forwarding */) } -func (e *endpoint) processExtensionHeader(it *header.IPv6PayloadIterator, pkt *stack.PacketBufferPtr, h header.IPv6, routerAlert **header.IPv6RouterAlertOption, hasFragmentHeader *bool, forwarding bool) (bool, error) { +func (e *endpoint) processExtensionHeader(it *header.IPv6PayloadIterator, pkt **stack.PacketBuffer, h header.IPv6, routerAlert **header.IPv6RouterAlertOption, hasFragmentHeader *bool, forwarding bool) (bool, error) { stats := e.stats.ip dstAddr := h.DestinationAddress() // Keep track of the start of the previous header so we can report the @@ -1455,7 +1452,7 @@ func (e *endpoint) processExtensionHeader(it *header.IPv6PayloadIterator, pkt *s // processExtensionHeaders processes the extension headers in the given packet. // Returns an error if the processing of a header failed or if the packet should // be discarded. -func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt stack.PacketBufferPtr, forwarding bool) error { +func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffer, forwarding bool) error { // Create a VV to parse the packet. We don't plan to modify anything here. // vv consists of: // - Any IPv6 header bytes after the first 40 (i.e. extensions). @@ -1491,7 +1488,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt stack.PacketBuffer } } -func (e *endpoint) processIPv6RawPayloadHeader(extHdr *header.IPv6RawPayloadHeader, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr, routerAlert *header.IPv6RouterAlertOption, previousHeaderStart uint32, hasFragmentHeader bool) error { +func (e *endpoint) processIPv6RawPayloadHeader(extHdr *header.IPv6RawPayloadHeader, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer, routerAlert *header.IPv6RouterAlertOption, previousHeaderStart uint32, hasFragmentHeader bool) error { stats := e.stats.ip // If the last header in the payload isn't a known IPv6 extension header, // handle it as if it is transport layer data.Ã¥ @@ -1573,7 +1570,7 @@ func (e *endpoint) processIPv6RawPayloadHeader(extHdr *header.IPv6RawPayloadHead } } -func (e *endpoint) processIPv6RoutingExtHeader(extHdr *header.IPv6RoutingExtHdr, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr) error { +func (e *endpoint) processIPv6RoutingExtHeader(extHdr *header.IPv6RoutingExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer) error { // As per RFC 8200 section 4.4, if a node encounters a routing header with // an unrecognized routing type value, with a non-zero Segments Left // value, the node must discard the packet and send an ICMP Parameter @@ -1596,7 +1593,7 @@ func (e *endpoint) processIPv6RoutingExtHeader(extHdr *header.IPv6RoutingExtHdr, return fmt.Errorf("found unrecognized routing type with non-zero segments left in header = %#v", extHdr) } -func (e *endpoint) processIPv6DestinationOptionsExtHdr(extHdr *header.IPv6DestinationOptionsExtHdr, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr, dstAddr tcpip.Address) error { +func (e *endpoint) processIPv6DestinationOptionsExtHdr(extHdr *header.IPv6DestinationOptionsExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer, dstAddr tcpip.Address) error { stats := e.stats.ip optsIt := extHdr.Iter() var uopt *header.IPv6UnknownExtHdrOption @@ -1659,7 +1656,7 @@ func (e *endpoint) processIPv6DestinationOptionsExtHdr(extHdr *header.IPv6Destin return nil } -func (e *endpoint) processIPv6HopByHopOptionsExtHdr(extHdr *header.IPv6HopByHopOptionsExtHdr, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr, dstAddr tcpip.Address, routerAlert **header.IPv6RouterAlertOption, previousHeaderStart uint32, forwarding bool) error { +func (e *endpoint) processIPv6HopByHopOptionsExtHdr(extHdr *header.IPv6HopByHopOptionsExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer, dstAddr tcpip.Address, routerAlert **header.IPv6RouterAlertOption, previousHeaderStart uint32, forwarding bool) error { stats := e.stats.ip // As per RFC 8200 section 4.1, the Hop By Hop extension header is // restricted to appear immediately after an IPv6 fixed header. @@ -1741,7 +1738,7 @@ func (e *endpoint) processIPv6HopByHopOptionsExtHdr(extHdr *header.IPv6HopByHopO return nil } -func (e *endpoint) processFragmentExtHdr(extHdr *header.IPv6FragmentExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBufferPtr, h header.IPv6) error { +func (e *endpoint) processFragmentExtHdr(extHdr *header.IPv6FragmentExtHdr, it *header.IPv6PayloadIterator, pkt **stack.PacketBuffer, h header.IPv6) error { stats := e.stats.ip fragmentFieldOffset := it.ParseOffset() @@ -2054,10 +2051,10 @@ func (e *endpoint) acquireAddressOrCreateTempLocked(localAddr tcpip.Address, all } // AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint. -func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint { +func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint { e.mu.RLock() defer e.mu.RUnlock() - return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, allowExpired) + return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint, allowExpired) } // getLinkLocalAddressRLocked returns a link-local address from the primary list @@ -2084,7 +2081,9 @@ func (e *endpoint) getLinkLocalAddressRLocked() tcpip.Address { // but with locking requirements. // // Precondition: e.mu must be read locked. -func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint { +func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint { + // TODO(b/309216156): Support IPv6 hints. + // addrCandidate is a candidate for Source Address Selection, as per // RFC 6724 section 5. type addrCandidate struct { @@ -2097,7 +2096,7 @@ func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address } if remoteAddr.BitLen() == 0 { - return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired) + return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, allowExpired) } // Create a candidate set of available addresses we can potentially use as a @@ -2196,7 +2195,7 @@ func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address // Return the most preferred address that can have its reference count // incremented. for _, c := range cs { - if c.addressEndpoint.IncRef() { + if c.addressEndpoint.TryIncRef() { return c.addressEndpoint } } @@ -2288,9 +2287,6 @@ type protocol struct { multicastForwardingDisp stack.MulticastForwardingEventDispatcher } - ids []atomicbitops.Uint32 - hashIV uint32 - // defaultTTL is the current default TTL for the protocol. Only the // uint8 portion of it is meaningful. defaultTTL atomicbitops.Uint32 @@ -2341,7 +2337,7 @@ func (p *protocol) NewEndpoint(nic stack.NetworkInterface, dispatcher stack.Tran const maxMulticastSolicit = 3 dadOptions := ip.DADOptions{ Clock: p.stack.Clock(), - SecureRNG: p.stack.SecureRNG(), + SecureRNG: p.stack.SecureRNG().Reader, NonceSize: nonceSize, ExtendDADTransmits: maxMulticastSolicit, Protocol: &e.mu.ndp, @@ -2562,7 +2558,7 @@ func (p *protocol) DisableMulticastForwarding() { p.multicastRouteTable.RemoveAllInstalledRoutes() } -func (p *protocol) forwardPendingMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) { +func (p *protocol) forwardPendingMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) { defer pkt.DecRef() // Attempt to forward the packet using the endpoint that it originally @@ -2592,7 +2588,7 @@ func (*protocol) Wait() {} // for releasing the returned View. // // Returns true if the IP header was successfully parsed. -func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bool) { +func (p *protocol) parseAndValidate(pkt *stack.PacketBuffer) (*buffer.View, bool) { transProtoNum, hasTransportHdr, ok := p.Parse(pkt) if !ok { return nil, false @@ -2612,7 +2608,7 @@ func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bo return pkt.NetworkHeader().View(), true } -func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip.TransportProtocolNumber) { +func (p *protocol) parseTransport(pkt *stack.PacketBuffer, transProtoNum tcpip.TransportProtocolNumber) { if transProtoNum == header.ICMPv6ProtocolNumber { // The transport layer will handle transport layer parsing errors. _ = parse.ICMPv6(pkt) @@ -2630,7 +2626,7 @@ func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip } // Parse implements stack.NetworkProtocol. -func (*protocol) Parse(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) { +func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) { proto, _, fragOffset, fragMore, ok := parse.IPv6(pkt) if !ok { return 0, false, false @@ -2652,7 +2648,7 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv6Type) bool { } // SendRejectionError implements stack.RejectIPv6WithHandler. -func (p *protocol) SendRejectionError(pkt stack.PacketBufferPtr, rejectWith stack.RejectIPv6WithICMPType, inputHook bool) tcpip.Error { +func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv6WithICMPType, inputHook bool) tcpip.Error { switch rejectWith { case stack.RejectIPv6WithICMPNoRoute: return p.returnError(&icmpReasonNetUnreachable{}, pkt, inputHook) @@ -2749,21 +2745,10 @@ type Options struct { func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory { opts.NDPConfigs.validate() - ids := hash.RandN32(buckets) - hashIV := hash.RandN32(1)[0] - - atomicIds := make([]atomicbitops.Uint32, len(ids)) - for i := range ids { - atomicIds[i] = atomicbitops.FromUint32(ids[i]) - } - return func(s *stack.Stack) stack.NetworkProtocol { p := &protocol{ stack: s, options: opts, - - ids: atomicIds, - hashIV: hashIV, } p.fragmentation = fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock(), p) p.mu.eps = make(map[tcpip.NICID]*endpoint) @@ -2796,35 +2781,22 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol { return NewProtocolWithOptions(Options{})(s) } -func calculateFragmentReserve(pkt stack.PacketBufferPtr) int { +func calculateFragmentReserve(pkt *stack.PacketBuffer) int { return pkt.AvailableHeaderBytes() + len(pkt.NetworkHeader().Slice()) + header.IPv6FragmentHeaderSize } -// hashRoute calculates a hash value for the given route. It uses the source & -// destination address and 32-bit number to generate the hash. -func hashRoute(r *stack.Route, hashIV uint32) uint32 { - // The FNV-1a was chosen because it is a fast hashing algorithm, and - // cryptographic properties are not needed here. - h := fnv.New32a() - localAddr := r.LocalAddress() - if _, err := h.Write(localAddr.AsSlice()); err != nil { - panic(fmt.Sprintf("Hash.Write: %s, but Hash' implementation of Write is not expected to ever return an error", err)) +// getFragmentID returns a random uint32 number (other than zero) to be used as +// fragment ID in the IPv6 header. +func (e *endpoint) getFragmentID() uint32 { + rng := e.protocol.stack.SecureRNG() + id := rng.Uint32() + for id == 0 { + id = rng.Uint32() } - remoteAddr := r.RemoteAddress() - if _, err := h.Write(remoteAddr.AsSlice()); err != nil { - panic(fmt.Sprintf("Hash.Write: %s, but Hash' implementation of Write is not expected to ever return an error", err)) - } - - s := make([]byte, 4) - binary.LittleEndian.PutUint32(s, hashIV) - if _, err := h.Write(s); err != nil { - panic(fmt.Sprintf("Hash.Write: %s, but Hash' implementation of Write is not expected ever to return an error", err)) - } - - return h.Sum32() + return id } -func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeaders header.IPv6, transportProto tcpip.TransportProtocolNumber, id uint32) (stack.PacketBufferPtr, bool) { +func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeaders header.IPv6, transportProto tcpip.TransportProtocolNumber, id uint32) (*stack.PacketBuffer, bool) { fragPkt, offset, copied, more := pf.BuildNextFragment() fragPkt.NetworkProtocolNumber = ProtocolNumber diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6_state_autogen.go index 13d42782..a3715503 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ipv6_state_autogen.go @@ -3,6 +3,8 @@ package ipv6 import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -21,10 +23,10 @@ func (i *icmpv6DestinationUnreachableSockError) StateSave(stateSinkObject state. i.beforeSave() } -func (i *icmpv6DestinationUnreachableSockError) afterLoad() {} +func (i *icmpv6DestinationUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv6DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv6DestinationUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (i *icmpv6DestinationNetworkUnreachableSockError) StateTypeName() string { @@ -45,10 +47,10 @@ func (i *icmpv6DestinationNetworkUnreachableSockError) StateSave(stateSinkObject stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError) } -func (i *icmpv6DestinationNetworkUnreachableSockError) afterLoad() {} +func (i *icmpv6DestinationNetworkUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv6DestinationNetworkUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv6DestinationNetworkUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError) } @@ -70,10 +72,10 @@ func (i *icmpv6DestinationPortUnreachableSockError) StateSave(stateSinkObject st stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError) } -func (i *icmpv6DestinationPortUnreachableSockError) afterLoad() {} +func (i *icmpv6DestinationPortUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv6DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv6DestinationPortUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError) } @@ -95,10 +97,10 @@ func (i *icmpv6DestinationAddressUnreachableSockError) StateSave(stateSinkObject stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError) } -func (i *icmpv6DestinationAddressUnreachableSockError) afterLoad() {} +func (i *icmpv6DestinationAddressUnreachableSockError) afterLoad(context.Context) {} // +checklocksignore -func (i *icmpv6DestinationAddressUnreachableSockError) StateLoad(stateSourceObject state.Source) { +func (i *icmpv6DestinationAddressUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError) } @@ -120,10 +122,10 @@ func (e *icmpv6PacketTooBigSockError) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &e.mtu) } -func (e *icmpv6PacketTooBigSockError) afterLoad() {} +func (e *icmpv6PacketTooBigSockError) afterLoad(context.Context) {} // +checklocksignore -func (e *icmpv6PacketTooBigSockError) StateLoad(stateSourceObject state.Source) { +func (e *icmpv6PacketTooBigSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.mtu) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/mld.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/mld.go index d474b44a..7feb5223 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/mld.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/mld.go @@ -230,7 +230,7 @@ func (*mldState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration { func (mld *mldState) init(ep *endpoint) { mld.ep = ep mld.genericMulticastProtocol.Init(&ep.mu.RWMutex, ip.GenericMulticastProtocolOptions{ - Rand: ep.protocol.stack.Rand(), + Rand: ep.protocol.stack.InsecureRNG(), Clock: ep.protocol.stack.Clock(), Protocol: mld, MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax, @@ -308,7 +308,7 @@ func (mld *mldState) softLeaveAll() { mld.genericMulticastProtocol.MakeAllNonMemberLocked() } -// initializeAll attemps to initialize the MLD state for each group that has +// initializeAll attempts to initialize the MLD state for each group that has // been joined locally. // // Precondition: mld.ep.mu must be locked. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ndp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ndp.go index 25c44576..b44879bc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ndp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv6/ndp.go @@ -1140,12 +1140,6 @@ func (ndp *ndpState) doSLAAC(prefix tcpip.Subnet, pl, vl time.Duration) { // // The IPv6 endpoint that ndp belongs to MUST be locked. func (ndp *ndpState) addAndAcquireSLAACAddr(addr tcpip.AddressWithPrefix, temporary bool, lifetimes stack.AddressLifetimes) stack.AddressEndpoint { - // Inform the integrator that we have a new SLAAC address. - ndpDisp := ndp.ep.protocol.options.NDPDisp - if ndpDisp == nil { - return nil - } - addressEndpoint, err := ndp.ep.addAndAcquirePermanentAddressLocked(addr, stack.AddressProperties{ PEB: stack.FirstPrimaryEndpoint, ConfigType: stack.AddressConfigSlaac, @@ -1156,8 +1150,11 @@ func (ndp *ndpState) addAndAcquireSLAACAddr(addr tcpip.AddressWithPrefix, tempor panic(fmt.Sprintf("ndp: error when adding SLAAC address %+v: %s", addr, err)) } - if disp := ndpDisp.OnAutoGenAddress(ndp.ep.nic.ID(), addr); disp != nil { - addressEndpoint.RegisterDispatcher(disp) + // Inform the integrator that we have a new SLAAC address. + if ndpDisp := ndp.ep.protocol.options.NDPDisp; ndpDisp != nil { + if disp := ndpDisp.OnAutoGenAddress(ndp.ep.nic.ID(), addr); disp != nil { + addressEndpoint.RegisterDispatcher(disp) + } } return addressEndpoint @@ -1172,7 +1169,7 @@ func (ndp *ndpState) addAndAcquireSLAACAddr(addr tcpip.AddressWithPrefix, tempor // The IPv6 endpoint that ndp belongs to MUST be locked. func (ndp *ndpState) generateSLAACAddr(prefix tcpip.Subnet, state *slaacPrefixState) bool { if addressEndpoint := state.stableAddr.addressEndpoint; addressEndpoint != nil { - panic(fmt.Sprintf("ndp: SLAAC prefix %s already has a permenant address %s", prefix, addressEndpoint.AddressWithPrefix())) + panic(fmt.Sprintf("ndp: SLAAC prefix %s already has a permanent address %s", prefix, addressEndpoint.AddressWithPrefix())) } // If we have already reached the maximum address generation attempts for the @@ -1623,7 +1620,7 @@ func (ndp *ndpState) refreshSLAACPrefixLifetimes(prefix tcpip.Subnet, prefixStat // have been regenerated, or we need to immediately regenerate an address // due to an update in preferred lifetime. // - // If each temporay address has already been regenerated, no new temporary + // If each temporary address has already been regenerated, no new temporary // address is generated. To ensure continuation of temporary SLAAC addresses, // we manually try to regenerate an address here. if regenForAddr.BitLen() != 0 || allAddressesRegenerated { @@ -1823,7 +1820,7 @@ func (ndp *ndpState) startSolicitingRouters() { // 4861 section 6.3.7. var delay time.Duration if ndp.configs.MaxRtrSolicitationDelay > 0 { - delay = time.Duration(ndp.ep.protocol.stack.Rand().Int63n(int64(ndp.configs.MaxRtrSolicitationDelay))) + delay = time.Duration(ndp.ep.protocol.stack.InsecureRNG().Int63n(int64(ndp.configs.MaxRtrSolicitationDelay))) } // Protected by ndp.ep.mu. @@ -1840,7 +1837,7 @@ func (ndp *ndpState) startSolicitingRouters() { // the unspecified address if no address is assigned // to the sending interface. localAddr := header.IPv6Any - if addressEndpoint := ndp.ep.AcquireOutgoingPrimaryAddress(header.IPv6AllRoutersLinkLocalMulticastAddress, false); addressEndpoint != nil { + if addressEndpoint := ndp.ep.AcquireOutgoingPrimaryAddress(header.IPv6AllRoutersLinkLocalMulticastAddress, tcpip.Address{} /* srcHint */, false); addressEndpoint != nil { localAddr = addressEndpoint.AddressWithPrefix().Address addressEndpoint.DecRef() } @@ -1968,7 +1965,7 @@ func (ndp *ndpState) init(ep *endpoint, dadOptions ip.DADOptions) { ndp.slaacPrefixes = make(map[tcpip.Subnet]slaacPrefixState) header.InitialTempIID(ndp.temporaryIIDHistory[:], ndp.ep.protocol.options.TempIIDSeed, ndp.ep.nic.ID()) - ndp.temporaryAddressDesyncFactor = time.Duration(ep.protocol.stack.Rand().Int63n(int64(MaxDesyncFactor))) + ndp.temporaryAddressDesyncFactor = time.Duration(ep.protocol.stack.InsecureRNG().Int63n(int64(MaxDesyncFactor))) } func (ndp *ndpState) SendDADMessage(addr tcpip.Address, nonce []byte) tcpip.Error { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go index a8d7bff2..409ef67e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/flags.go @@ -23,7 +23,7 @@ type Flags struct { // LoadBalanced indicates SO_REUSEPORT. // - // LoadBalanced takes precidence over MostRecent. + // LoadBalanced takes precedence over MostRecent. LoadBalanced bool // TupleOnly represents TCP SO_REUSEADDR. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go index 11a9dc0b..1e9040e4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go @@ -18,9 +18,8 @@ package ports import ( "math" - "math/rand" - "gvisor.dev/gvisor/pkg/atomicbitops" + "gvisor.dev/gvisor/pkg/rand" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -228,13 +227,6 @@ type PortManager struct { ephemeralMu sync.RWMutex firstEphemeral uint16 numEphemeral uint16 - - // hint is used to pick ports ephemeral ports in a stable order for - // a given port offset. - // - // hint must be accessed using the portHint/incPortHint helpers. - // TODO(gvisor.dev/issue/940): S/R this field. - hint atomicbitops.Uint32 } // NewPortManager creates new PortManager. @@ -255,41 +247,13 @@ type PortTester func(port uint16) (good bool, err tcpip.Error) // possible ephemeral ports, allowing the caller to decide whether a given port // is suitable for its needs, and stopping when a port is found or an error // occurs. -func (pm *PortManager) PickEphemeralPort(rng *rand.Rand, testPort PortTester) (port uint16, err tcpip.Error) { +func (pm *PortManager) PickEphemeralPort(rng rand.RNG, testPort PortTester) (port uint16, err tcpip.Error) { pm.ephemeralMu.RLock() firstEphemeral := pm.firstEphemeral numEphemeral := pm.numEphemeral pm.ephemeralMu.RUnlock() - offset := uint32(rng.Int31n(int32(numEphemeral))) - return pickEphemeralPort(offset, firstEphemeral, numEphemeral, testPort) -} - -// portHint atomically reads and returns the pm.hint value. -func (pm *PortManager) portHint() uint32 { - return pm.hint.Load() -} - -// incPortHint atomically increments pm.hint by 1. -func (pm *PortManager) incPortHint() { - pm.hint.Add(1) -} - -// PickEphemeralPortStable starts at the specified offset + pm.portHint and -// iterates over all ephemeral ports, allowing the caller to decide whether a -// given port is suitable for its needs and stopping when a port is found or an -// error occurs. -func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTester) (port uint16, err tcpip.Error) { - pm.ephemeralMu.RLock() - firstEphemeral := pm.firstEphemeral - numEphemeral := pm.numEphemeral - pm.ephemeralMu.RUnlock() - - p, err := pickEphemeralPort(pm.portHint()+offset, firstEphemeral, numEphemeral, testPort) - if err == nil { - pm.incPortHint() - } - return p, err + return pickEphemeralPort(rng.Uint32(), firstEphemeral, numEphemeral, testPort) } // pickEphemeralPort starts at the offset specified from the FirstEphemeral port @@ -297,6 +261,7 @@ func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTeste // caller to decide whether a given port is suitable for its needs, and stopping // when a port is found or an error occurs. func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) { + // This implements Algorithm 1 as per RFC 6056 Section 3.3.1. for i := uint32(0); i < uint32(count); i++ { port := uint16(uint32(first) + (offset+i)%uint32(count)) ok, err := testPort(port) @@ -320,7 +285,7 @@ func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester) // An optional PortTester can be passed in which if provided will be used to // test if the picked port can be used. The function should return true if the // port is safe to use, false otherwise. -func (pm *PortManager) ReservePort(rng *rand.Rand, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) { +func (pm *PortManager) ReservePort(rng rand.RNG, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) { pm.mu.Lock() defer pm.mu.Unlock() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go index 2719f6c4..163dc259 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports_state_autogen.go @@ -3,6 +3,8 @@ package ports import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -28,10 +30,10 @@ func (f *Flags) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &f.TupleOnly) } -func (f *Flags) afterLoad() {} +func (f *Flags) afterLoad(context.Context) {} // +checklocksignore -func (f *Flags) StateLoad(stateSourceObject state.Source) { +func (f *Flags) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &f.MostRecent) stateSourceObject.Load(1, &f.LoadBalanced) stateSourceObject.Load(2, &f.TupleOnly) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go index a3aadb22..b8196912 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go @@ -63,6 +63,10 @@ type SocketOptionsHandler interface { // changed. The handler notifies the writers if the send buffer size is // increased with setsockopt(2) for TCP endpoints. WakeupWriters() + + // GetAcceptConn returns true if the socket is a TCP socket and is in + // listening state. + GetAcceptConn() bool } // DefaultSocketOptionsHandler is an embeddable type that implements no-op @@ -112,6 +116,11 @@ func (*DefaultSocketOptionsHandler) OnSetReceiveBufferSize(v, oldSz int64) (newS return v, nil } +// GetAcceptConn implements SocketOptionsHandler.GetAcceptConn. +func (*DefaultSocketOptionsHandler) GetAcceptConn() bool { + return false +} + // StackHandler holds methods to access the stack options. These must be // implemented by the stack. type StackHandler interface { @@ -742,3 +751,8 @@ func (so *SocketOptions) SetRcvlowat(rcvlowat int32) Error { so.rcvlowat.Store(rcvlowat) return nil } + +// GetAcceptConn gets value for SO_ACCEPTCONN option. +func (so *SocketOptions) GetAcceptConn() bool { + return so.handler.GetAcceptConn() +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go index a0177a58..8373da7e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_mutex.go @@ -17,7 +17,7 @@ type addressStateRWMutex struct { var addressStatelockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type addressStatelockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go index 866a2c36..3be2d55b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go @@ -1,6 +1,7 @@ package stack import ( + "context" "fmt" "gvisor.dev/gvisor/pkg/atomicbitops" @@ -134,7 +135,7 @@ func (r *addressStateRefs) DecRef(destroy func()) { } } -func (r *addressStateRefs) afterLoad() { +func (r *addressStateRefs) afterLoad(context.Context) { if r.ReadRefs() > 0 { refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go index 91b615eb..815c8b12 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go @@ -18,6 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" ) func (lifetimes *AddressLifetimes) sanitize() { @@ -433,7 +434,7 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix { a.mu.RLock() defer a.mu.RUnlock() - ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool { + ep := a.acquirePrimaryAddressRLocked(tcpip.Address{}, tcpip.Address{} /* srcHint */, func(ep *addressState) bool { switch kind := ep.GetKind(); kind { case Permanent: return a.networkEndpoint.Enabled() || !a.options.HiddenWhileDisabled @@ -461,7 +462,34 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix { // valid according to isValid. // // +checklocksread:a.mu -func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*addressState) bool) *addressState { +func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, isValid func(*addressState) bool) *addressState { + // TODO: Move this out into IPv4-specific code. + // IPv6 handles source IP selection elsewhere. We have to do source + // selection only for IPv4, in which case ep is never deprecated. Thus + // we don't have to worry about refcounts. + if remoteAddr.Len() == header.IPv4AddressSize && remoteAddr != (tcpip.Address{}) { + var best *addressState + var bestLen uint8 + for _, state := range a.primary { + if !isValid(state) { + continue + } + // Source hint takes precedent over prefix matching. + if state.addr.Address == srcHint && srcHint != (tcpip.Address{}) { + best = state + break + } + stateLen := state.addr.Address.MatchingPrefix(remoteAddr) + if best == nil || bestLen < stateLen { + best = state + bestLen = stateLen + } + } + if best != nil && best.TryIncRef() { + return best + } + } + var deprecatedEndpoint *addressState for _, ep := range a.primary { if !isValid(ep) { @@ -469,7 +497,7 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*ad } if !ep.Deprecated() { - if ep.IncRef() { + if ep.TryIncRef() { // ep is not deprecated, so return it immediately. // // If we kept track of a deprecated endpoint, decrement its reference @@ -486,7 +514,7 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*ad return ep } - } else if deprecatedEndpoint == nil && ep.IncRef() { + } else if deprecatedEndpoint == nil && ep.TryIncRef() { // We prefer an endpoint that is not deprecated, but we keep track of // ep in case a doesn't have any non-deprecated endpoints. // @@ -518,7 +546,7 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc return nil } - if !addrState.IncRef() { + if !addrState.TryIncRef() { panic(fmt.Sprintf("failed to increase the reference count for address = %s", addrState.addr)) } @@ -527,7 +555,7 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc if f != nil { for _, addrState := range a.endpoints { - if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.IncRef() { + if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.TryIncRef() { return addrState } } @@ -595,11 +623,11 @@ func (a *AddressableEndpointState) AcquireAssignedAddress(localAddr tcpip.Addres } // AcquireOutgoingPrimaryAddress implements AddressableEndpoint. -func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint { +func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, srcHint tcpip.Address, allowExpired bool) AddressEndpoint { a.mu.Lock() defer a.mu.Unlock() - ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool { + ep := a.acquirePrimaryAddressRLocked(remoteAddr, srcHint, func(ep *addressState) bool { return ep.IsAssigned(allowExpired) }) @@ -782,7 +810,7 @@ func (a *addressState) IsAssigned(allowExpired bool) bool { } // IncRef implements AddressEndpoint. -func (a *addressState) IncRef() bool { +func (a *addressState) TryIncRef() bool { return a.refs.TryIncRef() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go index f78028d6..56ea53e3 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state_mutex.go @@ -17,7 +17,7 @@ type addressableEndpointStateRWMutex struct { var addressableEndpointStatelockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type addressableEndpointStatelockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go index e4100b1e..3cee9c82 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/bucket_mutex.go @@ -17,7 +17,7 @@ type bucketRWMutex struct { var bucketlockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type bucketlockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go index 0270b25d..0516e7b0 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/cleanup_endpoints_mutex.go @@ -19,7 +19,7 @@ var cleanupEndpointsprefixIndex *locking.MutexClass var cleanupEndpointslockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type cleanupEndpointslockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go index 6af809e7..6a9905ed 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_mutex.go @@ -17,7 +17,7 @@ type connRWMutex struct { var connlockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type connlockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go index ad020f1e..b416fda7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conn_track_mutex.go @@ -17,7 +17,7 @@ type connTrackRWMutex struct { var connTracklockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type connTracklockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go index 02bce870..b0378799 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go @@ -177,7 +177,7 @@ func (cn *conn) timedOut(now tcpip.MonotonicTime) bool { } // update the connection tracking state. -func (cn *conn) update(pkt PacketBufferPtr, reply bool) { +func (cn *conn) update(pkt *PacketBuffer, reply bool) { cn.stateMu.Lock() defer cn.stateMu.Unlock() @@ -269,7 +269,7 @@ func v6NetAndTransHdr(icmpPayload []byte, minTransHdrLen int) (header.Network, [ return netHdr, transHdr[:minTransHdrLen] } -func getEmbeddedNetAndTransHeaders(pkt PacketBufferPtr, netHdrLength int, getNetAndTransHdr netAndTransHeadersFunc, transProto tcpip.TransportProtocolNumber) (header.Network, header.ChecksummableTransport, bool) { +func getEmbeddedNetAndTransHeaders(pkt *PacketBuffer, netHdrLength int, getNetAndTransHdr netAndTransHeadersFunc, transProto tcpip.TransportProtocolNumber) (header.Network, header.ChecksummableTransport, bool) { switch transProto { case header.TCPProtocolNumber: if netAndTransHeader, ok := pkt.Data().PullUp(netHdrLength + header.TCPMinimumSize); ok { @@ -285,7 +285,7 @@ func getEmbeddedNetAndTransHeaders(pkt PacketBufferPtr, netHdrLength int, getNet return nil, nil, false } -func getHeaders(pkt PacketBufferPtr) (netHdr header.Network, transHdr header.Transport, isICMPError bool, ok bool) { +func getHeaders(pkt *PacketBuffer) (netHdr header.Network, transHdr header.Transport, isICMPError bool, ok bool) { switch pkt.TransportProtocolNumber { case header.TCPProtocolNumber: if tcpHeader := header.TCP(pkt.TransportHeader().Slice()); len(tcpHeader) >= header.TCPMinimumSize { @@ -373,7 +373,7 @@ func getTupleIDForRegularPacket(netHdr header.Network, netProto tcpip.NetworkPro } } -func getTupleIDForPacketInICMPError(pkt PacketBufferPtr, getNetAndTransHdr netAndTransHeadersFunc, netProto tcpip.NetworkProtocolNumber, netLen int, transProto tcpip.TransportProtocolNumber) (tupleID, bool) { +func getTupleIDForPacketInICMPError(pkt *PacketBuffer, getNetAndTransHdr netAndTransHeadersFunc, netProto tcpip.NetworkProtocolNumber, netLen int, transProto tcpip.TransportProtocolNumber) (tupleID, bool) { if netHdr, transHdr, ok := getEmbeddedNetAndTransHeaders(pkt, netLen, getNetAndTransHdr, transProto); ok { return tupleID{ srcAddr: netHdr.DestinationAddress(), @@ -396,7 +396,7 @@ const ( getTupleIDOKAndDontAllowNewConn ) -func getTupleIDForEchoPacket(pkt PacketBufferPtr, ident uint16, request bool) tupleID { +func getTupleIDForEchoPacket(pkt *PacketBuffer, ident uint16, request bool) tupleID { netHdr := pkt.Network() tid := tupleID{ srcAddr: netHdr.SourceAddress(), @@ -414,7 +414,7 @@ func getTupleIDForEchoPacket(pkt PacketBufferPtr, ident uint16, request bool) tu return tid } -func getTupleID(pkt PacketBufferPtr) (tupleID, getTupleIDDisposition) { +func getTupleID(pkt *PacketBuffer) (tupleID, getTupleIDDisposition) { switch pkt.TransportProtocolNumber { case header.TCPProtocolNumber: if transHeader := header.TCP(pkt.TransportHeader().Slice()); len(transHeader) >= header.TCPMinimumSize { @@ -504,7 +504,7 @@ func (ct *ConnTrack) init() { // // If the packet's protocol is trackable, the connection's state is updated to // match the contents of the packet. -func (ct *ConnTrack) getConnAndUpdate(pkt PacketBufferPtr, skipChecksumValidation bool) *tuple { +func (ct *ConnTrack) getConnAndUpdate(pkt *PacketBuffer, skipChecksumValidation bool) *tuple { // Get or (maybe) create a connection. t := func() *tuple { var allowNewConn bool @@ -695,20 +695,41 @@ func (cn *conn) finalize() bool { } } -func (cn *conn) maybePerformNoopNAT(dnat bool) { +// If NAT has not been configured for this connection, either mark the +// connection as configured for "no-op NAT", in the case of DNAT, or, in the +// case of SNAT, perform source port remapping so that source ports used by +// locally-generated traffic do not conflict with ports occupied by existing NAT +// bindings. +// +// Note that in the typical case this is also a no-op, because `snatAction` +// will do nothing if the original tuple is already unique. +func (cn *conn) maybePerformNoopNAT(pkt *PacketBuffer, hook Hook, r *Route, dnat bool) { cn.mu.Lock() - defer cn.mu.Unlock() - var manip *manipType if dnat { manip = &cn.destinationManip } else { manip = &cn.sourceManip } - - if *manip == manipNotPerformed { - *manip = manipPerformedNoop + if *manip != manipNotPerformed { + cn.mu.Unlock() + _ = cn.handlePacket(pkt, hook, r) + return } + if dnat { + *manip = manipPerformedNoop + cn.mu.Unlock() + _ = cn.handlePacket(pkt, hook, r) + return + } + cn.mu.Unlock() + + // At this point, we know that NAT has not yet been performed on this + // connection, and the DNAT case has been handled with a no-op. For SNAT, we + // simply perform source port remapping to ensure that source ports for + // locally generated traffic do not clash with ports used by existing NAT + // bindings. + _, _ = snatAction(pkt, hook, r, 0, tcpip.Address{}, true /* changePort */, false /* changeAddress */) } type portOrIdentRange struct { @@ -725,7 +746,7 @@ type portOrIdentRange struct { // // Generally, only the first packet of a connection reaches this method; other // packets will be manipulated without needing to modify the connection. -func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIdentRange, natAddress tcpip.Address, dnat bool) { +func (cn *conn) performNAT(pkt *PacketBuffer, hook Hook, r *Route, portsOrIdents portOrIdentRange, natAddress tcpip.Address, dnat, changePort, changeAddress bool) { lastPortOrIdent := func() uint16 { lastPortOrIdent := uint32(portsOrIdents.start) + portsOrIdents.size - 1 if lastPortOrIdent > math.MaxUint16 { @@ -762,12 +783,24 @@ func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIden return } *manip = manipPerformed - *address = natAddress + if changeAddress { + *address = natAddress + } + + // Everything below here is port-fiddling. + if !changePort { + return + } // Does the current port/ident fit in the range? if portsOrIdents.start <= *portOrIdent && *portOrIdent <= lastPortOrIdent { // Yes, is the current reply tuple unique? - if other := cn.ct.connForTID(cn.reply.tupleID); other == nil { + // + // Or, does the reply tuple refer to the same connection as the current one that + // we are NATing? This would apply, for example, to a self-connected socket, + // where the original and reply tuples are identical. + other := cn.ct.connForTID(cn.reply.tupleID) + if other == nil || other.conn == cn { // Yes! No need to change the port. return } @@ -826,7 +859,7 @@ func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIden // has had NAT performed on it. // // Returns true if the packet can skip the NAT table. -func (cn *conn) handlePacket(pkt PacketBufferPtr, hook Hook, rt *Route) bool { +func (cn *conn) handlePacket(pkt *PacketBuffer, hook Hook, rt *Route) bool { netHdr, transHdr, isICMPError, ok := getHeaders(pkt) if !ok { return false diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go index ba1cd360..60642030 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/endpoints_by_nic_mutex.go @@ -17,7 +17,7 @@ type endpointsByNICRWMutex struct { var endpointsByNIClockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type endpointsByNIClockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go index fc9be3d2..53a4b991 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go @@ -80,7 +80,7 @@ func (gb *groBucket) full() bool { // insert inserts pkt into the bucket. // +checklocks:gb.mu -func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint) { +func (gb *groBucket) insert(pkt *PacketBuffer, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint) { groPkt := &gb.packetsPrealloc[gb.allocIdxs[gb.count]] *groPkt = groPacket{ pkt: pkt, @@ -96,9 +96,9 @@ func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP } // removeOldest removes the oldest packet from gb and returns the contained -// PacketBufferPtr. gb must not be empty. +// *PacketBuffer. gb must not be empty. // +checklocks:gb.mu -func (gb *groBucket) removeOldest() PacketBufferPtr { +func (gb *groBucket) removeOldest() *PacketBuffer { pkt := gb.packets.Front() gb.packets.Remove(pkt) gb.count-- @@ -121,7 +121,7 @@ func (gb *groBucket) removeOne(pkt *groPacket) { // none exists. It also returns whether the groPkt should be flushed based on // differences between the two headers. // +checklocks:gb.mu -func (gb *groBucket) findGROPacket4(pkt PacketBufferPtr, ipHdr header.IPv4, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) { +func (gb *groBucket) findGROPacket4(pkt *PacketBuffer, ipHdr header.IPv4, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) { for groPkt := gb.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { // Do the addresses match? groIPHdr := header.IPv4(groPkt.ipHdr) @@ -163,7 +163,7 @@ func (gb *groBucket) findGROPacket4(pkt PacketBufferPtr, ipHdr header.IPv4, tcpH // none exists. It also returns whether the groPkt should be flushed based on // differences between the two headers. // +checklocks:gb.mu -func (gb *groBucket) findGROPacket6(pkt PacketBufferPtr, ipHdr header.IPv6, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) { +func (gb *groBucket) findGROPacket6(pkt *PacketBuffer, ipHdr header.IPv6, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) { for groPkt := gb.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { // Do the addresses match? groIPHdr := header.IPv6(groPkt.ipHdr) @@ -216,7 +216,7 @@ func (gb *groBucket) findGROPacket6(pkt PacketBufferPtr, ipHdr header.IPv6, tcpH } // +checklocks:gb.mu -func (gb *groBucket) found(gd *groDispatcher, groPkt *groPacket, flushGROPkt bool, pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint, updateIPHdr func([]byte, int)) { +func (gb *groBucket) found(gd *groDispatcher, groPkt *groPacket, flushGROPkt bool, pkt *PacketBuffer, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint, updateIPHdr func([]byte, int)) { // Flush groPkt or merge the packets. pktSize := pkt.Data().Size() flags := tcpHdr.Flags() @@ -301,7 +301,7 @@ type groPacket struct { groPacketEntry // pkt is the coalesced packet. - pkt PacketBufferPtr + pkt *PacketBuffer // ipHdr is the IP (v4 or v6) header for the coalesced packet. ipHdr []byte @@ -410,7 +410,7 @@ func (gd *groDispatcher) setInterval(interval time.Duration) { } // dispatch sends pkt up the stack after it undergoes GRO coalescing. -func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkProtocolNumber, ep NetworkEndpoint) { +func (gd *groDispatcher) dispatch(pkt *PacketBuffer, netProto tcpip.NetworkProtocolNumber, ep NetworkEndpoint) { // If GRO is disabled simply pass the packet along. if gd.getInterval() == 0 { ep.HandlePacket(pkt) @@ -428,7 +428,7 @@ func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkPro } } -func (gd *groDispatcher) dispatch4(pkt PacketBufferPtr, ep NetworkEndpoint) { +func (gd *groDispatcher) dispatch4(pkt *PacketBuffer, ep NetworkEndpoint) { // Immediately get the IPv4 and TCP headers. We need a way to hash the // packet into its bucket, which requires addresses and ports. Linux // simply gets a hash passed by hardware, but we're not so lucky. @@ -491,13 +491,13 @@ func (gd *groDispatcher) dispatch4(pkt PacketBufferPtr, ep NetworkEndpoint) { } // Now we can get the bucket for the packet. - bucket := &gd.buckets[gd.bucketForPacket(ipHdr, tcpHdr)&groNBucketsMask] + bucket := &gd.buckets[gd.bucketForPacket4(ipHdr, tcpHdr)&groNBucketsMask] bucket.mu.Lock() groPkt, flushGROPkt := bucket.findGROPacket4(pkt, ipHdr, tcpHdr, ep) bucket.found(gd, groPkt, flushGROPkt, pkt, ipHdr, tcpHdr, ep, updateIPv4Hdr) } -func (gd *groDispatcher) dispatch6(pkt PacketBufferPtr, ep NetworkEndpoint) { +func (gd *groDispatcher) dispatch6(pkt *PacketBuffer, ep NetworkEndpoint) { // Immediately get the IPv6 and TCP headers. We need a way to hash the // packet into its bucket, which requires addresses and ports. Linux // simply gets a hash passed by hardware, but we're not so lucky. @@ -589,13 +589,30 @@ func (gd *groDispatcher) dispatch6(pkt PacketBufferPtr, ep NetworkEndpoint) { } // Now we can get the bucket for the packet. - bucket := &gd.buckets[gd.bucketForPacket(ipHdr, tcpHdr)&groNBucketsMask] + bucket := &gd.buckets[gd.bucketForPacket6(ipHdr, tcpHdr)&groNBucketsMask] bucket.mu.Lock() groPkt, flushGROPkt := bucket.findGROPacket6(pkt, ipHdr, tcpHdr, ep) bucket.found(gd, groPkt, flushGROPkt, pkt, ipHdr, tcpHdr, ep, updateIPv6Hdr) } -func (gd *groDispatcher) bucketForPacket(ipHdr header.Network, tcpHdr header.TCP) int { +func (gd *groDispatcher) bucketForPacket4(ipHdr header.IPv4, tcpHdr header.TCP) int { + // TODO(b/256037250): Use jenkins or checksum. Write a test to print + // distribution. + var sum int + srcAddr := ipHdr.SourceAddress() + for _, val := range srcAddr.AsSlice() { + sum += int(val) + } + dstAddr := ipHdr.DestinationAddress() + for _, val := range dstAddr.AsSlice() { + sum += int(val) + } + sum += int(tcpHdr.SourcePort()) + sum += int(tcpHdr.DestinationPort()) + return sum +} + +func (gd *groDispatcher) bucketForPacket6(ipHdr header.IPv6, tcpHdr header.TCP) int { // TODO(b/256037250): Use jenkins or checksum. Write a test to print // distribution. var sum int @@ -627,7 +644,7 @@ func (gd *groDispatcher) flush() bool { // Returns true iff packets remain. func (gd *groDispatcher) flushSinceOrEqualTo(old time.Time) bool { type pair struct { - pkt PacketBufferPtr + pkt *PacketBuffer ep NetworkEndpoint } @@ -678,8 +695,9 @@ func (gd *groDispatcher) close() { for i := range gd.buckets { bucket := &gd.buckets[i] bucket.mu.Lock() - for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { + for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = bucket.packets.Front() { groPkt.pkt.DecRef() + bucket.removeOne(groPkt) } bucket.mu.Unlock() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go index 9efeb595..a28ea90c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go @@ -15,8 +15,10 @@ package stack import ( + "context" "fmt" "math/rand" + "reflect" "time" "gvisor.dev/gvisor/pkg/tcpip" @@ -48,11 +50,11 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables { v4Tables: [NumTables]Table{ NATID: { Rules: []Rule{ - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, }, BuiltinChains: [NumHooks]int{ Prerouting: 0, @@ -71,9 +73,9 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables { }, MangleID: { Rules: []Rule{ - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, }, BuiltinChains: [NumHooks]int{ Prerouting: 0, @@ -89,10 +91,10 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables { }, FilterID: { Rules: []Rule{ - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, - {Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, + {Filter: EmptyFilter4(), Target: &ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}}, }, BuiltinChains: [NumHooks]int{ Prerouting: HookUnset, @@ -113,11 +115,11 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables { v6Tables: [NumTables]Table{ NATID: { Rules: []Rule{ - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, }, BuiltinChains: [NumHooks]int{ Prerouting: 0, @@ -136,9 +138,9 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables { }, MangleID: { Rules: []Rule{ - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, }, BuiltinChains: [NumHooks]int{ Prerouting: 0, @@ -154,10 +156,10 @@ func DefaultTables(clock tcpip.Clock, rand *rand.Rand) *IPTables { }, FilterID: { Rules: []Rule{ - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, - {Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &AcceptTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, + {Filter: EmptyFilter6(), Target: &ErrorTarget{NetworkProtocol: header.IPv6ProtocolNumber}}, }, BuiltinChains: [NumHooks]int{ Prerouting: HookUnset, @@ -232,11 +234,29 @@ func (it *IPTables) getTableRLocked(id TableID, ipv6 bool) Table { // ReplaceTable replaces or inserts table by name. It panics when an invalid id // is provided. func (it *IPTables) ReplaceTable(id TableID, table Table, ipv6 bool) { + it.replaceTable(id, table, ipv6, false /* force */) +} + +// ForceReplaceTable replaces or inserts table by name. It panics when an invalid id +// is provided. It enables iptables even when the inserted table is all +// conditionless ACCEPT, skipping our optimization that disables iptables until +// they're modified. +func (it *IPTables) ForceReplaceTable(id TableID, table Table, ipv6 bool) { + it.replaceTable(id, table, ipv6, true /* force */) +} + +func (it *IPTables) replaceTable(id TableID, table Table, ipv6, force bool) { it.mu.Lock() defer it.mu.Unlock() + // If iptables is being enabled, initialize the conntrack table and // reaper. if !it.modified { + // Don't do anything if the table is identical. + if ((ipv6 && reflect.DeepEqual(table, it.v6Tables[id])) || (!ipv6 && reflect.DeepEqual(table, it.v4Tables[id]))) && !force { + return + } + it.connections.init() it.startReaper(reaperDelay) } @@ -281,7 +301,7 @@ type checkTable struct { // - Calls to dynamic functions, which can allocate. // // +checkescape:hard -func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt PacketBufferPtr) bool { +func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt *PacketBuffer) bool { switch pkt.NetworkProtocolNumber { case header.IPv4ProtocolNumber, header.IPv6ProtocolNumber: default: @@ -316,7 +336,7 @@ func (it *IPTables) shouldSkipOrPopulateTables(tables []checkTable, pkt PacketBu // that it does not allocate. Note that called functions (e.g. // getConnAndUpdate) can allocate. // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add. -func (it *IPTables) CheckPrerouting(pkt PacketBufferPtr, addressEP AddressableEndpoint, inNicName string) bool { +func (it *IPTables) CheckPrerouting(pkt *PacketBuffer, addressEP AddressableEndpoint, inNicName string) bool { tables := [...]checkTable{ { fn: check, @@ -354,7 +374,7 @@ func (it *IPTables) CheckPrerouting(pkt PacketBufferPtr, addressEP AddressableEn // that it does not allocate. Note that called functions (e.g. // getConnAndUpdate) can allocate. // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add. -func (it *IPTables) CheckInput(pkt PacketBufferPtr, inNicName string) bool { +func (it *IPTables) CheckInput(pkt *PacketBuffer, inNicName string) bool { tables := [...]checkTable{ { fn: checkNAT, @@ -394,7 +414,7 @@ func (it *IPTables) CheckInput(pkt PacketBufferPtr, inNicName string) bool { // that it does not allocate. Note that called functions (e.g. // getConnAndUpdate) can allocate. // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add. -func (it *IPTables) CheckForward(pkt PacketBufferPtr, inNicName, outNicName string) bool { +func (it *IPTables) CheckForward(pkt *PacketBuffer, inNicName, outNicName string) bool { tables := [...]checkTable{ { fn: check, @@ -426,7 +446,7 @@ func (it *IPTables) CheckForward(pkt PacketBufferPtr, inNicName, outNicName stri // that it does not allocate. Note that called functions (e.g. // getConnAndUpdate) can allocate. // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add. -func (it *IPTables) CheckOutput(pkt PacketBufferPtr, r *Route, outNicName string) bool { +func (it *IPTables) CheckOutput(pkt *PacketBuffer, r *Route, outNicName string) bool { tables := [...]checkTable{ { fn: check, @@ -470,7 +490,7 @@ func (it *IPTables) CheckOutput(pkt PacketBufferPtr, r *Route, outNicName string // that it does not allocate. Note that called functions (e.g. // getConnAndUpdate) can allocate. // TODO(b/233951539): checkescape fails on arm sometimes. Fix and re-add. -func (it *IPTables) CheckPostrouting(pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, outNicName string) bool { +func (it *IPTables) CheckPostrouting(pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, outNicName string) bool { tables := [...]checkTable{ { fn: check, @@ -501,16 +521,16 @@ func (it *IPTables) CheckPostrouting(pkt PacketBufferPtr, r *Route, addressEP Ad // Note: this used to omit the *IPTables parameter, but doing so caused // unnecessary allocations. -type checkTableFn func(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool +type checkTableFn func(it *IPTables, table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool -func checkNAT(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { +func checkNAT(it *IPTables, table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { return it.checkNAT(table, hook, pkt, r, addressEP, inNicName, outNicName) } // checkNAT runs the packet through the NAT table. // // See check. -func (it *IPTables) checkNAT(table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { +func (it *IPTables) checkNAT(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { t := pkt.tuple if t != nil && t.conn.handlePacket(pkt, hook, r) { return true @@ -541,14 +561,13 @@ func (it *IPTables) checkNAT(table Table, hook Hook, pkt PacketBufferPtr, r *Rou // // If the packet was already NATed, the connection must be NATed. if !natDone { - t.conn.maybePerformNoopNAT(dnat) - _ = t.conn.handlePacket(pkt, hook, r) + t.conn.maybePerformNoopNAT(pkt, hook, r, dnat) } return true } -func check(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { +func check(it *IPTables, table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { return it.check(table, hook, pkt, r, addressEP, inNicName, outNicName) } @@ -557,7 +576,7 @@ func check(it *IPTables, table Table, hook Hook, pkt PacketBufferPtr, r *Route, // network stack or tables, or false when it must be dropped. // // Precondition: The packet's network and transport header must be set. -func (it *IPTables) check(table Table, hook Hook, pkt PacketBufferPtr, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { +func (it *IPTables) check(table Table, hook Hook, pkt *PacketBuffer, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) bool { ruleIdx := table.BuiltinChains[hook] switch verdict := it.checkChain(hook, pkt, table, ruleIdx, r, addressEP, inNicName, outNicName); verdict { // If the table returns Accept, move on to the next table. @@ -594,7 +613,7 @@ func (it *IPTables) beforeSave() { } // afterLoad is invoked by stateify. -func (it *IPTables) afterLoad() { +func (it *IPTables) afterLoad(context.Context) { it.startReaper(reaperDelay) } @@ -610,7 +629,7 @@ func (it *IPTables) startReaper(interval time.Duration) { // Preconditions: // - pkt is a IPv4 packet of at least length header.IPv4MinimumSize. // - pkt.NetworkHeader is not nil. -func (it *IPTables) checkChain(hook Hook, pkt PacketBufferPtr, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) chainVerdict { +func (it *IPTables) checkChain(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) chainVerdict { // Start from ruleIdx and walk the list of rules until a rule gives us // a verdict. for ruleIdx < len(table.Rules) { @@ -660,7 +679,7 @@ func (it *IPTables) checkChain(hook Hook, pkt PacketBufferPtr, table Table, rule // // * pkt is a IPv4 packet of at least length header.IPv4MinimumSize. // * pkt.NetworkHeader is not nil. -func (it *IPTables) checkRule(hook Hook, pkt PacketBufferPtr, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) (RuleVerdict, int) { +func (it *IPTables) checkRule(hook Hook, pkt *PacketBuffer, table Table, ruleIdx int, r *Route, addressEP AddressableEndpoint, inNicName, outNicName string) (RuleVerdict, int) { rule := table.Rules[ruleIdx] // Check whether the packet matches the IP header filter. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go index 984498d1..9a2b97f0 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_mutex.go @@ -17,7 +17,7 @@ type ipTablesRWMutex struct { var ipTableslockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type ipTableslockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go index 4ba1f3e8..f47b21fe 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go @@ -30,7 +30,7 @@ type AcceptTarget struct { } // Action implements Target.Action. -func (*AcceptTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { +func (*AcceptTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { return RuleAccept, 0 } @@ -41,14 +41,14 @@ type DropTarget struct { } // Action implements Target.Action. -func (*DropTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { +func (*DropTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { return RuleDrop, 0 } // RejectIPv4WithHandler handles rejecting a packet. type RejectIPv4WithHandler interface { // SendRejectionError sends an error packet in response to the packet. - SendRejectionError(pkt PacketBufferPtr, rejectWith RejectIPv4WithICMPType, inputHook bool) tcpip.Error + SendRejectionError(pkt *PacketBuffer, rejectWith RejectIPv4WithICMPType, inputHook bool) tcpip.Error } // RejectIPv4WithICMPType indicates the type of ICMP error that should be sent. @@ -73,7 +73,7 @@ type RejectIPv4Target struct { } // Action implements Target.Action. -func (rt *RejectIPv4Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) { +func (rt *RejectIPv4Target) Action(pkt *PacketBuffer, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) { switch hook { case Input, Forward, Output: // There is nothing reasonable for us to do in response to an error here; @@ -90,7 +90,7 @@ func (rt *RejectIPv4Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ A // RejectIPv6WithHandler handles rejecting a packet. type RejectIPv6WithHandler interface { // SendRejectionError sends an error packet in response to the packet. - SendRejectionError(pkt PacketBufferPtr, rejectWith RejectIPv6WithICMPType, forwardingHook bool) tcpip.Error + SendRejectionError(pkt *PacketBuffer, rejectWith RejectIPv6WithICMPType, forwardingHook bool) tcpip.Error } // RejectIPv6WithICMPType indicates the type of ICMP error that should be sent. @@ -113,7 +113,7 @@ type RejectIPv6Target struct { } // Action implements Target.Action. -func (rt *RejectIPv6Target) Action(pkt PacketBufferPtr, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) { +func (rt *RejectIPv6Target) Action(pkt *PacketBuffer, hook Hook, _ *Route, _ AddressableEndpoint) (RuleVerdict, int) { switch hook { case Input, Forward, Output: // There is nothing reasonable for us to do in response to an error here; @@ -135,7 +135,7 @@ type ErrorTarget struct { } // Action implements Target.Action. -func (*ErrorTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { +func (*ErrorTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { log.Debugf("ErrorTarget triggered.") return RuleDrop, 0 } @@ -150,7 +150,7 @@ type UserChainTarget struct { } // Action implements Target.Action. -func (*UserChainTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { +func (*UserChainTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { panic("UserChainTarget should never be called.") } @@ -162,7 +162,7 @@ type ReturnTarget struct { } // Action implements Target.Action. -func (*ReturnTarget) Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { +func (*ReturnTarget) Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) { return RuleReturn, 0 } @@ -182,10 +182,20 @@ type DNATTarget struct { // // Immutable. NetworkProtocol tcpip.NetworkProtocolNumber + + // ChangeAddress indicates whether we should check addresses. + // + // Immutable. + ChangeAddress bool + + // ChangePort indicates whether we should check ports. + // + // Immutable. + ChangePort bool } // Action implements Target.Action. -func (rt *DNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) { +func (rt *DNATTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) { // Sanity check. if rt.NetworkProtocol != pkt.NetworkProtocolNumber { panic(fmt.Sprintf( @@ -201,7 +211,7 @@ func (rt *DNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP panic(fmt.Sprintf("%s unrecognized", hook)) } - return dnatAction(pkt, hook, r, rt.Port, rt.Addr) + return dnatAction(pkt, hook, r, rt.Port, rt.Addr, rt.ChangePort, rt.ChangeAddress) } @@ -219,7 +229,7 @@ type RedirectTarget struct { } // Action implements Target.Action. -func (rt *RedirectTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) { +func (rt *RedirectTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) { // Sanity check. if rt.NetworkProtocol != pkt.NetworkProtocolNumber { panic(fmt.Sprintf( @@ -244,7 +254,7 @@ func (rt *RedirectTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addre panic("redirect target is supported only on output and prerouting hooks") } - return dnatAction(pkt, hook, r, rt.Port, address) + return dnatAction(pkt, hook, r, rt.Port, address, true /* changePort */, true /* changeAddress */) } // SNATTarget modifies the source port/IP in the outgoing packets. @@ -255,10 +265,20 @@ type SNATTarget struct { // NetworkProtocol is the network protocol the target is used with. It // is immutable. NetworkProtocol tcpip.NetworkProtocolNumber + + // ChangeAddress indicates whether we should check addresses. + // + // Immutable. + ChangeAddress bool + + // ChangePort indicates whether we should check ports. + // + // Immutable. + ChangePort bool } -func dnatAction(pkt PacketBufferPtr, hook Hook, r *Route, port uint16, address tcpip.Address) (RuleVerdict, int) { - return natAction(pkt, hook, r, portOrIdentRange{start: port, size: 1}, address, true /* dnat */) +func dnatAction(pkt *PacketBuffer, hook Hook, r *Route, port uint16, address tcpip.Address, changePort, changeAddress bool) (RuleVerdict, int) { + return natAction(pkt, hook, r, portOrIdentRange{start: port, size: 1}, address, true /* dnat */, changePort, changeAddress) } func targetPortRangeForTCPAndUDP(originalSrcPort uint16) portOrIdentRange { @@ -278,7 +298,7 @@ func targetPortRangeForTCPAndUDP(originalSrcPort uint16) portOrIdentRange { } } -func snatAction(pkt PacketBufferPtr, hook Hook, r *Route, port uint16, address tcpip.Address) (RuleVerdict, int) { +func snatAction(pkt *PacketBuffer, hook Hook, r *Route, port uint16, address tcpip.Address, changePort, changeAddress bool) (RuleVerdict, int) { portsOrIdents := portOrIdentRange{start: port, size: 1} switch pkt.TransportProtocolNumber { @@ -298,17 +318,17 @@ func snatAction(pkt PacketBufferPtr, hook Hook, r *Route, port uint16, address t portsOrIdents = portOrIdentRange{start: 0, size: math.MaxUint16 + 1} } - return natAction(pkt, hook, r, portsOrIdents, address, false /* dnat */) + return natAction(pkt, hook, r, portsOrIdents, address, false /* dnat */, changePort, changeAddress) } -func natAction(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIdentRange, address tcpip.Address, dnat bool) (RuleVerdict, int) { +func natAction(pkt *PacketBuffer, hook Hook, r *Route, portsOrIdents portOrIdentRange, address tcpip.Address, dnat, changePort, changeAddress bool) (RuleVerdict, int) { // Drop the packet if network and transport header are not set. if len(pkt.NetworkHeader().Slice()) == 0 || len(pkt.TransportHeader().Slice()) == 0 { return RuleDrop, 0 } if t := pkt.tuple; t != nil { - t.conn.performNAT(pkt, hook, r, portsOrIdents, address, dnat) + t.conn.performNAT(pkt, hook, r, portsOrIdents, address, dnat, changePort, changeAddress) return RuleAccept, 0 } @@ -316,7 +336,7 @@ func natAction(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIde } // Action implements Target.Action. -func (st *SNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, _ AddressableEndpoint) (RuleVerdict, int) { +func (st *SNATTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, _ AddressableEndpoint) (RuleVerdict, int) { // Sanity check. if st.NetworkProtocol != pkt.NetworkProtocolNumber { panic(fmt.Sprintf( @@ -332,7 +352,7 @@ func (st *SNATTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, _ Address panic(fmt.Sprintf("%s unrecognized", hook)) } - return snatAction(pkt, hook, r, st.Port, st.Addr) + return snatAction(pkt, hook, r, st.Port, st.Addr, st.ChangePort, st.ChangeAddress) } // MasqueradeTarget modifies the source port/IP in the outgoing packets. @@ -343,7 +363,7 @@ type MasqueradeTarget struct { } // Action implements Target.Action. -func (mt *MasqueradeTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) { +func (mt *MasqueradeTarget) Action(pkt *PacketBuffer, hook Hook, r *Route, addressEP AddressableEndpoint) (RuleVerdict, int) { // Sanity check. if mt.NetworkProtocol != pkt.NetworkProtocolNumber { panic(fmt.Sprintf( @@ -360,7 +380,7 @@ func (mt *MasqueradeTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, add } // addressEP is expected to be set for the postrouting hook. - ep := addressEP.AcquireOutgoingPrimaryAddress(pkt.Network().DestinationAddress(), false /* allowExpired */) + ep := addressEP.AcquireOutgoingPrimaryAddress(pkt.Network().DestinationAddress(), tcpip.Address{} /* srcHint */, false /* allowExpired */) if ep == nil { // No address exists that we can use as a source address. return RuleDrop, 0 @@ -368,7 +388,7 @@ func (mt *MasqueradeTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, add address := ep.AddressWithPrefix().Address ep.DecRef() - return snatAction(pkt, hook, r, 0 /* port */, address) + return snatAction(pkt, hook, r, 0 /* port */, address, true /* changePort */, true /* changeAddress */) } func rewritePacket(n header.Network, t header.Transport, updateSRCFields, fullChecksum, updatePseudoHeader bool, newPortOrIdent uint16, newAddr tcpip.Address) { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go index 3a908f9e..86aef602 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go @@ -103,6 +103,14 @@ type IPTables struct { modified bool } +// Modified returns whether iptables has been modified. It is inherently racy +// and intended for use only in tests. +func (it *IPTables) Modified() bool { + it.mu.Lock() + defer it.mu.Unlock() + return it.modified +} + // VisitTargets traverses all the targets of all tables and replaces each with // transform(target). func (it *IPTables) VisitTargets(transform func(Target) Target) { @@ -235,11 +243,31 @@ type IPHeaderFilter struct { OutputInterfaceInvert bool } +// EmptyFilter4 returns an initialized IPv4 header filter. +func EmptyFilter4() IPHeaderFilter { + return IPHeaderFilter{ + Dst: tcpip.AddrFrom4([4]byte{}), + DstMask: tcpip.AddrFrom4([4]byte{}), + Src: tcpip.AddrFrom4([4]byte{}), + SrcMask: tcpip.AddrFrom4([4]byte{}), + } +} + +// EmptyFilter6 returns an initialized IPv6 header filter. +func EmptyFilter6() IPHeaderFilter { + return IPHeaderFilter{ + Dst: tcpip.AddrFrom16([16]byte{}), + DstMask: tcpip.AddrFrom16([16]byte{}), + Src: tcpip.AddrFrom16([16]byte{}), + SrcMask: tcpip.AddrFrom16([16]byte{}), + } +} + // match returns whether pkt matches the filter. // // Preconditions: pkt.NetworkHeader is set and is at least of the minimal IPv4 // or IPv6 header length. -func (fl IPHeaderFilter) match(pkt PacketBufferPtr, hook Hook, inNicName, outNicName string) bool { +func (fl IPHeaderFilter) match(pkt *PacketBuffer, hook Hook, inNicName, outNicName string) bool { // Extract header fields. var ( transProto tcpip.TransportProtocolNumber @@ -347,7 +375,7 @@ type Matcher interface { // used for suspicious packets. // // Precondition: packet.NetworkHeader is set. - Match(hook Hook, packet PacketBufferPtr, inputInterfaceName, outputInterfaceName string) (matches bool, hotdrop bool) + Match(hook Hook, packet *PacketBuffer, inputInterfaceName, outputInterfaceName string) (matches bool, hotdrop bool) } // A Target is the interface for taking an action for a packet. @@ -355,5 +383,5 @@ type Target interface { // Action takes an action on the packet and returns a verdict on how // traversal should (or should not) continue. If the return value is // Jump, it also returns the index of the rule to jump to. - Action(PacketBufferPtr, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) + Action(*PacketBuffer, Hook, *Route, AddressableEndpoint) (RuleVerdict, int) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go index 7e2d5818..1038997b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/multi_port_endpoint_mutex.go @@ -17,7 +17,7 @@ type multiPortEndpointRWMutex struct { var multiPortEndpointlockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type multiPortEndpointlockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go index b38bef4e..ce7215ca 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go @@ -247,7 +247,7 @@ func (n *neighborCache) clear() { } n.mu.dynamic.lru = neighborEntryList{} - n.mu.cache = make(map[tcpip.Address]*neighborEntry) + clear(n.mu.cache) n.mu.dynamic.count = 0 } @@ -298,7 +298,7 @@ func (n *neighborCache) handleConfirmation(addr tcpip.Address, linkAddr tcpip.Li func (n *neighborCache) init(nic *nic, r LinkAddressResolver) { *n = neighborCache{ nic: nic, - state: NewNUDState(nic.stack.nudConfigs, nic.stack.clock, nic.stack.randomGenerator), + state: NewNUDState(nic.stack.nudConfigs, nic.stack.clock, nic.stack.insecureRNG), linkRes: r, } n.mu.Lock() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go index 290e48b5..0de0fea6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache_mutex.go @@ -17,7 +17,7 @@ type neighborCacheRWMutex struct { var neighborCachelockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type neighborCachelockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go index f8be1dae..c6b08eb8 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry_mutex.go @@ -17,7 +17,7 @@ type neighborEntryRWMutex struct { var neighborEntrylockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type neighborEntrylockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go index bafe633d..39028f9e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go @@ -79,6 +79,13 @@ type nic struct { qDisc QueueingDiscipline gro groDispatcher + + // deliverLinkPackets specifies whether this NIC delivers packets to + // packet sockets. It is immutable. + // + // deliverLinkPackets is off by default because some users already + // deliver link packets by explicitly calling nic.DeliverLinkPackets. + deliverLinkPackets bool } // makeNICStats initializes the NIC statistics and associates them to the global @@ -140,7 +147,7 @@ type delegatingQueueingDiscipline struct { func (*delegatingQueueingDiscipline) Close() {} // WritePacket passes the packet through to the underlying LinkWriter's WritePackets. -func (qDisc *delegatingQueueingDiscipline) WritePacket(pkt PacketBufferPtr) tcpip.Error { +func (qDisc *delegatingQueueingDiscipline) WritePacket(pkt *PacketBuffer) tcpip.Error { var pkts PacketBufferList pkts.PushBack(pkt) _, err := qDisc.LinkWriter.WritePackets(pkts) @@ -174,6 +181,7 @@ func newNIC(stack *Stack, id tcpip.NICID, ep LinkEndpoint, opts NICOptions) *nic linkAddrResolvers: make(map[tcpip.NetworkProtocolNumber]*linkResolver), duplicateAddressDetectors: make(map[tcpip.NetworkProtocolNumber]DuplicateAddressDetector), qDisc: qDisc, + deliverLinkPackets: opts.DeliverLinkPackets, } nic.linkResQueue.init(nic) @@ -339,7 +347,7 @@ func (n *nic) IsLoopback() bool { } // WritePacket implements NetworkEndpoint. -func (n *nic) WritePacket(r *Route, pkt PacketBufferPtr) tcpip.Error { +func (n *nic) WritePacket(r *Route, pkt *PacketBuffer) tcpip.Error { routeInfo, _, err := r.resolvedFields(nil) switch err.(type) { case nil: @@ -370,7 +378,7 @@ func (n *nic) WritePacket(r *Route, pkt PacketBufferPtr) tcpip.Error { } // WritePacketToRemote implements NetworkInterface. -func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt PacketBufferPtr) tcpip.Error { +func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt *PacketBuffer) tcpip.Error { pkt.EgressRoute = RouteInfo{ routeInfo: routeInfo{ NetProto: pkt.NetworkProtocolNumber, @@ -381,14 +389,26 @@ func (n *nic) WritePacketToRemote(remoteLinkAddr tcpip.LinkAddress, pkt PacketBu return n.writePacket(pkt) } -func (n *nic) writePacket(pkt PacketBufferPtr) tcpip.Error { +func (n *nic) writePacket(pkt *PacketBuffer) tcpip.Error { n.NetworkLinkEndpoint.AddHeader(pkt) return n.writeRawPacket(pkt) } -func (n *nic) writeRawPacket(pkt PacketBufferPtr) tcpip.Error { +func (n *nic) writeRawPacketWithLinkHeaderInPayload(pkt *PacketBuffer) tcpip.Error { + if !n.NetworkLinkEndpoint.ParseHeader(pkt) { + return &tcpip.ErrMalformedHeader{} + } + return n.writeRawPacket(pkt) +} + +func (n *nic) writeRawPacket(pkt *PacketBuffer) tcpip.Error { // Always an outgoing packet. pkt.PktType = tcpip.PacketOutgoing + + if n.deliverLinkPackets { + n.DeliverLinkPacket(pkt.NetworkProtocolNumber, pkt) + } + if err := n.qDisc.WritePacket(pkt); err != nil { if _, ok := err.(*tcpip.ErrNoBufferSpace); ok { n.stats.txPacketsDroppedNoBufferSpace.Increment() @@ -413,7 +433,7 @@ func (n *nic) Spoofing() bool { // primaryAddress returns an address that can be used to communicate with // remoteAddr. -func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr tcpip.Address) AssignableAddressEndpoint { +func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr, srcHint tcpip.Address) AssignableAddressEndpoint { ep := n.getNetworkEndpoint(protocol) if ep == nil { return nil @@ -424,7 +444,7 @@ func (n *nic) primaryEndpoint(protocol tcpip.NetworkProtocolNumber, remoteAddr t return nil } - return addressableEndpoint.AcquireOutgoingPrimaryAddress(remoteAddr, n.Spoofing()) + return addressableEndpoint.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, n.Spoofing()) } type getAddressBehaviour int @@ -708,7 +728,7 @@ func (n *nic) isInGroup(addr tcpip.Address) bool { // DeliverNetworkPacket finds the appropriate network protocol endpoint and // hands the packet over for further processing. This function is called when // the NIC receives a packet from the link endpoint. -func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) { +func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) { enabled := n.Enabled() // If the NIC is not yet enabled, don't receive any packets. if !enabled { @@ -728,19 +748,23 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt Pac pkt.RXChecksumValidated = n.NetworkLinkEndpoint.Capabilities()&CapabilityRXChecksumOffload != 0 + if n.deliverLinkPackets { + n.DeliverLinkPacket(protocol, pkt) + } + n.gro.dispatch(pkt, protocol, networkEndpoint) } -func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) { +func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) { // Deliver to interested packet endpoints without holding NIC lock. - var packetEPPkt PacketBufferPtr + var packetEPPkt *PacketBuffer defer func() { - if !packetEPPkt.IsNil() { + if packetEPPkt != nil { packetEPPkt.DecRef() } }() deliverPacketEPs := func(ep PacketEndpoint) { - if packetEPPkt.IsNil() { + if packetEPPkt == nil { // Packet endpoints hold the full packet. // // We perform a deep copy because higher-level endpoints may point to @@ -790,7 +814,7 @@ func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt Packet // DeliverTransportPacket delivers the packets to the appropriate transport // protocol endpoint. -func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) TransportPacketDisposition { +func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) TransportPacketDisposition { state, ok := n.stack.transportProtocols[protocol] if !ok { n.stats.unknownL4ProtocolRcvdPacketCounts.Increment(uint64(protocol)) @@ -850,7 +874,7 @@ func (n *nic) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt } // DeliverTransportError implements TransportDispatcher. -func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt PacketBufferPtr) { +func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt *PacketBuffer) { state, ok := n.stack.transportProtocols[trans] if !ok { return @@ -878,7 +902,7 @@ func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.Netwo } // DeliverRawPacket implements TransportDispatcher. -func (n *nic) DeliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) { +func (n *nic) DeliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) { // For ICMPv4 only we validate the header length for compatibility with // raw(7) ICMP_FILTER. The same check is made in Linux here: // https://github.com/torvalds/linux/blob/70585216/net/ipv4/raw.c#L189. @@ -921,7 +945,7 @@ func (n *nic) setNUDConfigs(protocol tcpip.NetworkProtocolNumber, c NUDConfigura return &tcpip.ErrNotSupported{} } -func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) tcpip.Error { +func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) { n.packetEPsMu.Lock() defer n.packetEPsMu.Unlock() @@ -931,8 +955,6 @@ func (n *nic) registerPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep Pa n.packetEPs[netProto] = eps } eps.add(ep) - - return nil } func (n *nic) unregisterPacketEndpoint(netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go index 95bfb301..e3b2332a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic_mutex.go @@ -17,7 +17,7 @@ type nicRWMutex struct { var niclockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type niclockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go index 86b75695..24956e71 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go @@ -58,9 +58,6 @@ type PacketBufferOptions struct { OnRelease func() } -// PacketBufferPtr is a pointer to a PacketBuffer. -type PacketBufferPtr = *PacketBuffer - // A PacketBuffer contains all the data of a network packet. // // As a PacketBuffer traverses up the stack, it may be necessary to pass it to @@ -94,7 +91,7 @@ type PacketBufferPtr = *PacketBuffer // // Outgoing Packet: When a header is pushed, `pushed` gets incremented by the // pushed length, and the current value is stored for each header. PacketBuffer -// substracts this value from `reserved` to compute the starting offset of each +// subtracts this value from `reserved` to compute the starting offset of each // header in `buf`. // // Incoming Packet: When a header is consumed (a.k.a. parsed), the current @@ -172,7 +169,7 @@ type PacketBuffer struct { } // NewPacketBuffer creates a new PacketBuffer with opts. -func NewPacketBuffer(opts PacketBufferOptions) PacketBufferPtr { +func NewPacketBuffer(opts PacketBufferOptions) *PacketBuffer { pk := pkPool.Get().(*PacketBuffer) pk.reset() if opts.ReserveHeaderBytes != 0 { @@ -190,7 +187,7 @@ func NewPacketBuffer(opts PacketBufferOptions) PacketBufferPtr { } // IncRef increments the PacketBuffer's refcount. -func (pk PacketBufferPtr) IncRef() PacketBufferPtr { +func (pk *PacketBuffer) IncRef() *PacketBuffer { pk.packetBufferRefs.IncRef() return pk } @@ -198,7 +195,7 @@ func (pk PacketBufferPtr) IncRef() PacketBufferPtr { // DecRef decrements the PacketBuffer's refcount. If the refcount is // decremented to zero, the PacketBuffer is returned to the PacketBuffer // pool. -func (pk PacketBufferPtr) DecRef() { +func (pk *PacketBuffer) DecRef() { pk.packetBufferRefs.DecRef(func() { if pk.onRelease != nil { pk.onRelease() @@ -209,24 +206,24 @@ func (pk PacketBufferPtr) DecRef() { }) } -func (pk PacketBufferPtr) reset() { +func (pk *PacketBuffer) reset() { *pk = PacketBuffer{} } // ReservedHeaderBytes returns the number of bytes initially reserved for // headers. -func (pk PacketBufferPtr) ReservedHeaderBytes() int { +func (pk *PacketBuffer) ReservedHeaderBytes() int { return pk.reserved } // AvailableHeaderBytes returns the number of bytes currently available for // headers. This is relevant to PacketHeader.Push method only. -func (pk PacketBufferPtr) AvailableHeaderBytes() int { +func (pk *PacketBuffer) AvailableHeaderBytes() int { return pk.reserved - pk.pushed } // VirtioNetHeader returns the handle to virtio-layer header. -func (pk PacketBufferPtr) VirtioNetHeader() PacketHeader { +func (pk *PacketBuffer) VirtioNetHeader() PacketHeader { return PacketHeader{ pk: pk, typ: virtioNetHeader, @@ -234,7 +231,7 @@ func (pk PacketBufferPtr) VirtioNetHeader() PacketHeader { } // LinkHeader returns the handle to link-layer header. -func (pk PacketBufferPtr) LinkHeader() PacketHeader { +func (pk *PacketBuffer) LinkHeader() PacketHeader { return PacketHeader{ pk: pk, typ: linkHeader, @@ -242,7 +239,7 @@ func (pk PacketBufferPtr) LinkHeader() PacketHeader { } // NetworkHeader returns the handle to network-layer header. -func (pk PacketBufferPtr) NetworkHeader() PacketHeader { +func (pk *PacketBuffer) NetworkHeader() PacketHeader { return PacketHeader{ pk: pk, typ: networkHeader, @@ -250,7 +247,7 @@ func (pk PacketBufferPtr) NetworkHeader() PacketHeader { } // TransportHeader returns the handle to transport-layer header. -func (pk PacketBufferPtr) TransportHeader() PacketHeader { +func (pk *PacketBuffer) TransportHeader() PacketHeader { return PacketHeader{ pk: pk, typ: transportHeader, @@ -258,29 +255,33 @@ func (pk PacketBufferPtr) TransportHeader() PacketHeader { } // HeaderSize returns the total size of all headers in bytes. -func (pk PacketBufferPtr) HeaderSize() int { +func (pk *PacketBuffer) HeaderSize() int { return pk.pushed + pk.consumed } // Size returns the size of packet in bytes. -func (pk PacketBufferPtr) Size() int { +func (pk *PacketBuffer) Size() int { return int(pk.buf.Size()) - pk.headerOffset() } // MemSize returns the estimation size of the pk in memory, including backing // buffer data. -func (pk PacketBufferPtr) MemSize() int { +func (pk *PacketBuffer) MemSize() int { return int(pk.buf.Size()) + PacketBufferStructSize } // Data returns the handle to data portion of pk. -func (pk PacketBufferPtr) Data() PacketData { +func (pk *PacketBuffer) Data() PacketData { return PacketData{pk: pk} } // AsSlices returns the underlying storage of the whole packet. -func (pk PacketBufferPtr) AsSlices() [][]byte { - var views [][]byte +// +// Note that AsSlices can allocate a lot. In hot paths it may be preferable to +// iterate over a PacketBuffer's data via AsViewList. +func (pk *PacketBuffer) AsSlices() [][]byte { + vl := pk.buf.AsViewList() + views := make([][]byte, 0, vl.Len()) offset := pk.headerOffset() pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *buffer.View) { views = append(views, v.AsSlice()) @@ -288,9 +289,15 @@ func (pk PacketBufferPtr) AsSlices() [][]byte { return views } +// AsViewList returns the list of Views backing the PacketBuffer along with the +// header offset into them. Users may not save or modify the ViewList returned. +func (pk *PacketBuffer) AsViewList() (buffer.ViewList, int) { + return pk.buf.AsViewList(), pk.headerOffset() +} + // ToBuffer returns a caller-owned copy of the underlying storage of the whole // packet. -func (pk PacketBufferPtr) ToBuffer() buffer.Buffer { +func (pk *PacketBuffer) ToBuffer() buffer.Buffer { b := pk.buf.Clone() b.TrimFront(int64(pk.headerOffset())) return b @@ -298,7 +305,7 @@ func (pk PacketBufferPtr) ToBuffer() buffer.Buffer { // ToView returns a caller-owned copy of the underlying storage of the whole // packet as a view. -func (pk PacketBufferPtr) ToView() *buffer.View { +func (pk *PacketBuffer) ToView() *buffer.View { p := buffer.NewView(int(pk.buf.Size())) offset := pk.headerOffset() pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *buffer.View) { @@ -307,19 +314,19 @@ func (pk PacketBufferPtr) ToView() *buffer.View { return p } -func (pk PacketBufferPtr) headerOffset() int { +func (pk *PacketBuffer) headerOffset() int { return pk.reserved - pk.pushed } -func (pk PacketBufferPtr) headerOffsetOf(typ headerType) int { +func (pk *PacketBuffer) headerOffsetOf(typ headerType) int { return pk.reserved + pk.headers[typ].offset } -func (pk PacketBufferPtr) dataOffset() int { +func (pk *PacketBuffer) dataOffset() int { return pk.reserved + pk.consumed } -func (pk PacketBufferPtr) push(typ headerType, size int) []byte { +func (pk *PacketBuffer) push(typ headerType, size int) []byte { h := &pk.headers[typ] if h.length > 0 { panic(fmt.Sprintf("push(%s, %d) called after previous push", typ, size)) @@ -334,7 +341,7 @@ func (pk PacketBufferPtr) push(typ headerType, size int) []byte { return view.AsSlice() } -func (pk PacketBufferPtr) consume(typ headerType, size int) (v []byte, consumed bool) { +func (pk *PacketBuffer) consume(typ headerType, size int) (v []byte, consumed bool) { h := &pk.headers[typ] if h.length > 0 { panic(fmt.Sprintf("consume must not be called twice: type %s", typ)) @@ -349,7 +356,7 @@ func (pk PacketBufferPtr) consume(typ headerType, size int) (v []byte, consumed return view.AsSlice(), true } -func (pk PacketBufferPtr) headerView(typ headerType) buffer.View { +func (pk *PacketBuffer) headerView(typ headerType) buffer.View { h := &pk.headers[typ] if h.length == 0 { return buffer.View{} @@ -363,7 +370,7 @@ func (pk PacketBufferPtr) headerView(typ headerType) buffer.View { // Clone makes a semi-deep copy of pk. The underlying packet payload is // shared. Hence, no modifications is done to underlying packet payload. -func (pk PacketBufferPtr) Clone() PacketBufferPtr { +func (pk *PacketBuffer) Clone() *PacketBuffer { newPk := pkPool.Get().(*PacketBuffer) newPk.reset() newPk.buf = pk.buf.Clone() @@ -389,7 +396,7 @@ func (pk PacketBufferPtr) Clone() PacketBufferPtr { // ReserveHeaderBytes prepends reserved space for headers at the front // of the underlying buf. Can only be called once per packet. -func (pk PacketBufferPtr) ReserveHeaderBytes(reserved int) { +func (pk *PacketBuffer) ReserveHeaderBytes(reserved int) { if pk.reserved != 0 { panic(fmt.Sprintf("ReserveHeaderBytes(...) called on packet with reserved=%d, want reserved=0", pk.reserved)) } @@ -400,7 +407,7 @@ func (pk PacketBufferPtr) ReserveHeaderBytes(reserved int) { // Network returns the network header as a header.Network. // // Network should only be called when NetworkHeader has been set. -func (pk PacketBufferPtr) Network() header.Network { +func (pk *PacketBuffer) Network() header.Network { switch netProto := pk.NetworkProtocolNumber; netProto { case header.IPv4ProtocolNumber: return header.IPv4(pk.NetworkHeader().Slice()) @@ -416,7 +423,7 @@ func (pk PacketBufferPtr) Network() header.Network { // // See PacketBuffer.Data for details about how a packet buffer holds an inbound // packet. -func (pk PacketBufferPtr) CloneToInbound() PacketBufferPtr { +func (pk *PacketBuffer) CloneToInbound() *PacketBuffer { newPk := pkPool.Get().(*PacketBuffer) newPk.reset() newPk.buf = pk.buf.Clone() @@ -432,7 +439,7 @@ func (pk PacketBufferPtr) CloneToInbound() PacketBufferPtr { // // The returned packet buffer will have the network and transport headers // set if the original packet buffer did. -func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketBufferPtr { +func (pk *PacketBuffer) DeepCopyForForwarding(reservedHeaderBytes int) *PacketBuffer { payload := BufferSince(pk.NetworkHeader()) defer payload.Release() newPk := NewPacketBuffer(PacketBufferOptions{ @@ -462,11 +469,6 @@ func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketB return newPk } -// IsNil returns whether the pointer is logically nil. -func (pk PacketBufferPtr) IsNil() bool { - return pk == nil -} - // headerInfo stores metadata about a header in a packet. // // +stateify savable @@ -481,7 +483,7 @@ type headerInfo struct { // PacketHeader is a handle object to a header in the underlying packet. type PacketHeader struct { - pk PacketBufferPtr + pk *PacketBuffer typ headerType } @@ -523,7 +525,7 @@ func (h PacketHeader) Consume(size int) (v []byte, consumed bool) { // // +stateify savable type PacketData struct { - pk PacketBufferPtr + pk *PacketBuffer } // PullUp returns a contiguous slice of size bytes from the beginning of d. @@ -601,7 +603,7 @@ func (d PacketData) MergeBuffer(b *buffer.Buffer) { // MergeFragment appends the data portion of frag to dst. It modifies // frag and frag should not be used again. -func MergeFragment(dst, frag PacketBufferPtr) { +func MergeFragment(dst, frag *PacketBuffer) { frag.buf.TrimFront(int64(frag.dataOffset())) dst.buf.Merge(&frag.buf) } @@ -674,7 +676,7 @@ func (d PacketData) ChecksumAtOffset(offset int) uint16 { // Range represents a contiguous subportion of a PacketBuffer. type Range struct { - pk PacketBufferPtr + pk *PacketBuffer offset int length int } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go index 8b226b73..a3a85693 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go @@ -1,6 +1,7 @@ package stack import ( + "context" "fmt" "gvisor.dev/gvisor/pkg/atomicbitops" @@ -134,7 +135,7 @@ func (r *packetBufferRefs) DecRef(destroy func()) { } } -func (r *packetBufferRefs) afterLoad() { +func (r *packetBufferRefs) afterLoad(context.Context) { if r.ReadRefs() > 0 { refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go index ddfb8004..9d1105b2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go @@ -21,8 +21,8 @@ const PacketBufferStructSize = int(unsafe.Sizeof(PacketBuffer{})) // ID returns a unique ID for the underlying storage of the packet. // -// Two PacketBufferPtrs have the same IDs if and only if they point to the same +// Two *PacketBuffers have the same IDs if and only if they point to the same // location in memory. -func (pk PacketBufferPtr) ID() uintptr { +func (pk *PacketBuffer) ID() uintptr { return uintptr(unsafe.Pointer(pk)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go index c7e6ef64..ad3e0b28 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_endpoint_list_mutex.go @@ -17,7 +17,7 @@ type packetEndpointListRWMutex struct { var packetEndpointListlockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type packetEndpointListlockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go index 2c7d2d9d..4e9dda8b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_eps_mutex.go @@ -17,7 +17,7 @@ type packetEPsRWMutex struct { var packetEPslockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type packetEPslockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go index c5660882..ac47a79e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packets_pending_link_resolution_mutex.go @@ -19,7 +19,7 @@ var packetsPendingLinkResolutionprefixIndex *locking.MutexClass var packetsPendingLinkResolutionlockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type packetsPendingLinkResolutionlockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go index 0627fb81..a7452f89 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/pending_packets.go @@ -29,7 +29,7 @@ const ( type pendingPacket struct { routeInfo RouteInfo - pkt PacketBufferPtr + pkt *PacketBuffer } // packetsPendingLinkResolution is a queue of packets pending link resolution. @@ -54,7 +54,7 @@ type packetsPendingLinkResolution struct { } } -func (f *packetsPendingLinkResolution) incrementOutgoingPacketErrors(pkt PacketBufferPtr) { +func (f *packetsPendingLinkResolution) incrementOutgoingPacketErrors(pkt *PacketBuffer) { f.nic.stack.stats.IP.OutgoingPacketErrors.Increment() if ipEndpointStats, ok := f.nic.getNetworkEndpoint(pkt.NetworkProtocolNumber).Stats().(IPNetworkEndpointStats); ok { @@ -113,7 +113,7 @@ func (f *packetsPendingLinkResolution) dequeue(ch <-chan struct{}, linkAddr tcpi // If the maximum number of pending resolutions is reached, the packets // associated with the oldest link resolution will be dequeued as if they failed // link resolution. -func (f *packetsPendingLinkResolution) enqueue(r *Route, pkt PacketBufferPtr) tcpip.Error { +func (f *packetsPendingLinkResolution) enqueue(r *Route, pkt *PacketBuffer) tcpip.Error { f.mu.Lock() // Make sure we attempt resolution while holding f's lock so that we avoid // a race where link resolution completes before we enqueue the packets. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go index a76e5328..eb829b71 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go @@ -120,12 +120,12 @@ type TransportEndpoint interface { // transport endpoint. It sets the packet buffer's transport header. // // HandlePacket may modify the packet. - HandlePacket(TransportEndpointID, PacketBufferPtr) + HandlePacket(TransportEndpointID, *PacketBuffer) // HandleError is called when the transport endpoint receives an error. // // HandleError takes may modify the packet buffer. - HandleError(TransportError, PacketBufferPtr) + HandleError(TransportError, *PacketBuffer) // Abort initiates an expedited endpoint teardown. It puts the endpoint // in a closed state and frees all resources associated with it. This @@ -153,7 +153,7 @@ type RawTransportEndpoint interface { // layer up. // // HandlePacket may modify the packet. - HandlePacket(PacketBufferPtr) + HandlePacket(*PacketBuffer) } // PacketEndpoint is the interface that needs to be implemented by packet @@ -171,7 +171,7 @@ type PacketEndpoint interface { // should construct its own ethernet header for applications. // // HandlePacket may modify pkt. - HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) + HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt *PacketBuffer) } // UnknownDestinationPacketDisposition enumerates the possible return values from @@ -221,7 +221,7 @@ type TransportProtocol interface { // // HandleUnknownDestinationPacket may modify the packet if it handles // the issue. - HandleUnknownDestinationPacket(TransportEndpointID, PacketBufferPtr) UnknownDestinationPacketDisposition + HandleUnknownDestinationPacket(TransportEndpointID, *PacketBuffer) UnknownDestinationPacketDisposition // SetOption allows enabling/disabling protocol specific features. // SetOption returns an error if the option is not supported or the @@ -250,7 +250,7 @@ type TransportProtocol interface { // Parse sets pkt.TransportHeader and trims pkt.Data appropriately. It does // neither and returns false if pkt.Data is too small, i.e. pkt.Data.Size() < // MinimumPacketSize() - Parse(pkt PacketBufferPtr) (ok bool) + Parse(pkt *PacketBuffer) (ok bool) } // TransportPacketDisposition is the result from attempting to deliver a packet @@ -282,18 +282,18 @@ type TransportDispatcher interface { // pkt.NetworkHeader must be set before calling DeliverTransportPacket. // // DeliverTransportPacket may modify the packet. - DeliverTransportPacket(tcpip.TransportProtocolNumber, PacketBufferPtr) TransportPacketDisposition + DeliverTransportPacket(tcpip.TransportProtocolNumber, *PacketBuffer) TransportPacketDisposition // DeliverTransportError delivers an error to the appropriate transport // endpoint. // // DeliverTransportError may modify the packet buffer. - DeliverTransportError(local, remote tcpip.Address, _ tcpip.NetworkProtocolNumber, _ tcpip.TransportProtocolNumber, _ TransportError, _ PacketBufferPtr) + DeliverTransportError(local, remote tcpip.Address, _ tcpip.NetworkProtocolNumber, _ tcpip.TransportProtocolNumber, _ TransportError, _ *PacketBuffer) // DeliverRawPacket delivers a packet to any subscribed raw sockets. // // DeliverRawPacket does NOT take ownership of the packet buffer. - DeliverRawPacket(tcpip.TransportProtocolNumber, PacketBufferPtr) + DeliverRawPacket(tcpip.TransportProtocolNumber, *PacketBuffer) } // PacketLooping specifies where an outbound packet should be sent. @@ -534,11 +534,11 @@ type AssignableAddressEndpoint interface { // to its NetworkEndpoint. IsAssigned(allowExpired bool) bool - // IncRef increments this endpoint's reference count. + // TryIncRef tries to increment this endpoint's reference count. // // Returns true if it was successfully incremented. If it returns false, then // the endpoint is considered expired and should no longer be used. - IncRef() bool + TryIncRef() bool // DecRef decrements this endpoint's reference count. DecRef() @@ -681,7 +681,7 @@ type AddressableEndpoint interface { // The returned endpoint's reference count is incremented. // // Returns nil if a primary address is not available. - AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint + AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) AddressEndpoint // PrimaryAddresses returns the primary addresses. PrimaryAddresses() []tcpip.AddressWithPrefix @@ -740,13 +740,13 @@ type NetworkInterface interface { CheckLocalAddress(tcpip.NetworkProtocolNumber, tcpip.Address) bool // WritePacketToRemote writes the packet to the given remote link address. - WritePacketToRemote(tcpip.LinkAddress, PacketBufferPtr) tcpip.Error + WritePacketToRemote(tcpip.LinkAddress, *PacketBuffer) tcpip.Error // WritePacket writes a packet through the given route. // // WritePacket may modify the packet buffer. The packet buffer's // network and transport header must be set. - WritePacket(*Route, PacketBufferPtr) tcpip.Error + WritePacket(*Route, *PacketBuffer) tcpip.Error // HandleNeighborProbe processes an incoming neighbor probe (e.g. ARP // request or NDP Neighbor Solicitation). @@ -764,7 +764,7 @@ type NetworkInterface interface { type LinkResolvableNetworkEndpoint interface { // HandleLinkResolutionFailure is called when link resolution prevents the // argument from having been sent. - HandleLinkResolutionFailure(PacketBufferPtr) + HandleLinkResolutionFailure(*PacketBuffer) } // NetworkEndpoint is the interface that needs to be implemented by endpoints @@ -802,17 +802,17 @@ type NetworkEndpoint interface { // WritePacket writes a packet to the given destination address and // protocol. It may modify pkt. pkt.TransportHeader must have // already been set. - WritePacket(r *Route, params NetworkHeaderParams, pkt PacketBufferPtr) tcpip.Error + WritePacket(r *Route, params NetworkHeaderParams, pkt *PacketBuffer) tcpip.Error // WriteHeaderIncludedPacket writes a packet that includes a network // header to the given destination address. It may modify pkt. - WriteHeaderIncludedPacket(r *Route, pkt PacketBufferPtr) tcpip.Error + WriteHeaderIncludedPacket(r *Route, pkt *PacketBuffer) tcpip.Error // HandlePacket is called by the link layer when new packets arrive to // this network endpoint. It sets pkt.NetworkHeader. // // HandlePacket may modify pkt. - HandlePacket(pkt PacketBufferPtr) + HandlePacket(pkt *PacketBuffer) // Close is called when the endpoint is removed from a stack. Close() @@ -911,7 +911,7 @@ type NetworkProtocol interface { // - Whether there is an encapsulated transport protocol payload (e.g. ARP // does not encapsulate anything). // - Whether pkt.Data was large enough to parse and set pkt.NetworkHeader. - Parse(pkt PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) + Parse(pkt *PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) } // UnicastSourceAndMulticastDestination is a tuple that represents a unicast @@ -929,7 +929,7 @@ type MulticastRouteOutgoingInterface struct { // ID corresponds to the outgoing NIC. ID tcpip.NICID - // MinTTL represents the minumum TTL/HopLimit a multicast packet must have to + // MinTTL represents the minimum TTL/HopLimit a multicast packet must have to // be sent through the outgoing interface. // // Note: a value of 0 allows all packets to be forwarded. @@ -1027,14 +1027,14 @@ type NetworkDispatcher interface { // If the link-layer has a header, the packet's link header must be populated. // // DeliverNetworkPacket may modify pkt. - DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) + DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) // DeliverLinkPacket delivers a packet to any interested packet endpoints. // // This method should be called with both incoming and outgoing packets. // // If the link-layer has a header, the packet's link header must be populated. - DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) + DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) } // LinkEndpointCapabilities is the type associated with the capabilities @@ -1065,6 +1065,9 @@ type LinkWriter interface { // WritePackets writes packets. Must not be called with an empty list of // packet buffers. // + // Each packet must have the link-layer header set, if the link requires + // one. + // // WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList. // it is not safe to use the PacketBufferList after a call to WritePackets. WritePackets(PacketBufferList) (int, tcpip.Error) @@ -1120,7 +1123,10 @@ type NetworkLinkEndpoint interface { ARPHardwareType() header.ARPHardwareType // AddHeader adds a link layer header to the packet if required. - AddHeader(PacketBufferPtr) + AddHeader(*PacketBuffer) + + // ParseHeader parses the link layer header to the packet. + ParseHeader(*PacketBuffer) bool } // QueueingDiscipline provides a queueing strategy for outgoing packets (e.g @@ -1134,7 +1140,7 @@ type QueueingDiscipline interface { // To participate in transparent bridging, a LinkEndpoint implementation // should call eth.Encode with header.EthernetFields.SrcAddr set to // pkg.EgressRoute.LocalLinkAddress if it is provided. - WritePacket(PacketBufferPtr) tcpip.Error + WritePacket(*PacketBuffer) tcpip.Error Close() } @@ -1155,7 +1161,7 @@ type InjectableLinkEndpoint interface { LinkEndpoint // InjectInbound injects an inbound packet. - InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) + InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt *PacketBuffer) // InjectOutbound writes a fully formed outbound packet directly to the // link. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go index 9755362a..e386ded7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go @@ -101,6 +101,11 @@ func (r *Route) Loop() PacketLooping { return r.routeInfo.Loop } +// OutgoingNIC returns the route's outgoing NIC. +func (r *Route) OutgoingNIC() tcpip.NICID { + return r.outgoingNIC.id +} + // RouteInfo contains all of Route's exported fields. // // +stateify savable @@ -487,7 +492,7 @@ func (r *Route) isValidForOutgoingRLocked() bool { } // WritePacket writes the packet through the given route. -func (r *Route) WritePacket(params NetworkHeaderParams, pkt PacketBufferPtr) tcpip.Error { +func (r *Route) WritePacket(params NetworkHeaderParams, pkt *PacketBuffer) tcpip.Error { if !r.isValidForOutgoing() { return &tcpip.ErrInvalidEndpointState{} } @@ -497,7 +502,7 @@ func (r *Route) WritePacket(params NetworkHeaderParams, pkt PacketBufferPtr) tcp // WriteHeaderIncludedPacket writes a packet already containing a network // header through the given route. -func (r *Route) WriteHeaderIncludedPacket(pkt PacketBufferPtr) tcpip.Error { +func (r *Route) WriteHeaderIncludedPacket(pkt *PacketBuffer) tcpip.Error { if !r.isValidForOutgoing() { return &tcpip.ErrInvalidEndpointState{} } @@ -537,7 +542,7 @@ func (r *Route) Acquire() { // +checklocksread:r.mu func (r *Route) acquireLocked() { if ep := r.localAddressEndpoint; ep != nil { - if !ep.IncRef() { + if !ep.TryIncRef() { panic(fmt.Sprintf("failed to increment reference count for local address endpoint = %s", r.LocalAddress())) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go index 0a5bdd4e..28a5e869 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_mutex.go @@ -17,7 +17,7 @@ type routeRWMutex struct { var routelockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type routelockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go index 1c7c9285..ec3796c3 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route_stack_mutex.go @@ -17,7 +17,7 @@ type routeStackRWMutex struct { var routeStacklockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type routeStacklockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go index d0b4df7d..ee8bca2d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go @@ -45,16 +45,22 @@ const ( type transportProtocolState struct { proto TransportProtocol - defaultHandler func(id TransportEndpointID, pkt PacketBufferPtr) bool + defaultHandler func(id TransportEndpointID, pkt *PacketBuffer) bool } -// ResumableEndpoint is an endpoint that needs to be resumed after restore. -type ResumableEndpoint interface { - // Resume resumes an endpoint after restore. This can be used to restart - // background workers such as protocol goroutines. This must be called after - // all indirect dependencies of the endpoint has been restored, which +// RestoredEndpoint is an endpoint that needs to be restored. +type RestoredEndpoint interface { + // Restore restores an endpoint. This can be used to restart background + // workers such as protocol goroutines. This must be called after all + // indirect dependencies of the endpoint has been restored, which // generally implies at the end of the restore process. - Resume(*Stack) + Restore(*Stack) +} + +// ResumableEndpoint is an endpoint that needs to be resumed after save. +type ResumableEndpoint interface { + // Resume resumes an endpoint. + Resume() } // uniqueIDGenerator is a default unique ID generator. @@ -115,8 +121,12 @@ type Stack struct { // TODO(gvisor.dev/issue/4595): S/R this field. tables *IPTables - // resumableEndpoints is a list of endpoints that need to be resumed if the + // restoredEndpoints is a list of endpoints that need to be restored if the // stack is being restored. + restoredEndpoints []RestoredEndpoint + + // resumableEndpoints is a list of endpoints that need to be resumed + // after save. resumableEndpoints []ResumableEndpoint // icmpRateLimiter is a global rate limiter for all ICMP messages generated @@ -139,11 +149,12 @@ type Stack struct { uniqueIDGenerator UniqueID // randomGenerator is an injectable pseudo random generator that can be - // used when a random number is required. - randomGenerator *rand.Rand + // used when a random number is required. It must not be used in + // security-sensitive contexts. + insecureRNG *rand.Rand // secureRNG is a cryptographically secure random number generator. - secureRNG io.Reader + secureRNG cryptorand.RNG // sendBufferSize holds the min/default/max send buffer sizes for // endpoints other than TCP. @@ -343,6 +354,7 @@ func New(opts Options) *Stack { if opts.SecureRNG == nil { opts.SecureRNG = cryptorand.Reader } + secureRNG := cryptorand.RNGFrom(opts.SecureRNG) randSrc := opts.RandSource if randSrc == nil { @@ -354,13 +366,13 @@ func New(opts Options) *Stack { // we wrap it in a simple thread-safe version. randSrc = &lockedRandomSource{src: rand.NewSource(v)} } - randomGenerator := rand.New(randSrc) + insecureRNG := rand.New(randSrc) if opts.IPTables == nil { if opts.DefaultIPTables == nil { opts.DefaultIPTables = DefaultTables } - opts.IPTables = opts.DefaultIPTables(clock, randomGenerator) + opts.IPTables = opts.DefaultIPTables(clock, insecureRNG) } opts.NUDConfigs.resetInvalidFields() @@ -378,12 +390,12 @@ func New(opts Options) *Stack { handleLocal: opts.HandleLocal, tables: opts.IPTables, icmpRateLimiter: NewICMPRateLimiter(clock), - seed: randomGenerator.Uint32(), + seed: secureRNG.Uint32(), nudConfigs: opts.NUDConfigs, uniqueIDGenerator: opts.UniqueID, nudDisp: opts.NUDDisp, - randomGenerator: randomGenerator, - secureRNG: opts.SecureRNG, + insecureRNG: insecureRNG, + secureRNG: secureRNG, sendBufferSize: tcpip.SendBufferSizeOption{ Min: MinBufferSize, Default: DefaultBufferSize, @@ -395,7 +407,7 @@ func New(opts Options) *Stack { Max: DefaultMaxBufferSize, }, tcpInvalidRateLimit: defaultTCPInvalidRateLimit, - tsOffsetSecret: randomGenerator.Uint32(), + tsOffsetSecret: secureRNG.Uint32(), } // Add specified network protocols. @@ -483,12 +495,22 @@ func (s *Stack) TransportProtocolOption(transport tcpip.TransportProtocolNumber, return transProtoState.proto.Option(option) } +// SendBufSizeProto is a protocol that can return its send buffer size. +type SendBufSizeProto interface { + SendBufferSize() tcpip.TCPSendBufferSizeRangeOption +} + +// TCPSendBufferLimits returns the TCP send buffer size limit. +func (s *Stack) TCPSendBufferLimits() tcpip.TCPSendBufferSizeRangeOption { + return s.transportProtocols[header.TCPProtocolNumber].proto.(SendBufSizeProto).SendBufferSize() +} + // SetTransportProtocolHandler sets the per-stack default handler for the given // protocol. // // It must be called only during initialization of the stack. Changing it as the // stack is operating is not supported. -func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(TransportEndpointID, PacketBufferPtr) bool) { +func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(TransportEndpointID, *PacketBuffer) bool) { state := s.transportProtocols[p] if state != nil { state.defaultHandler = h @@ -839,6 +861,10 @@ type NICOptions struct { // GROTimeout specifies the GRO timeout. Zero bypasses GRO. GROTimeout time.Duration + + // DeliverLinkPackets specifies whether the NIC is responsible for + // delivering raw packets to packet sockets. + DeliverLinkPackets bool } // CreateNICWithOptions creates a NIC with the provided id, LinkEndpoint, and @@ -962,14 +988,14 @@ func (s *Stack) removeNICLocked(id tcpip.NICID) tcpip.Error { // Remove routes in-place. n tracks the number of routes written. s.routeMu.Lock() n := 0 - for i, r := range s.routeTable { - s.routeTable[i] = tcpip.Route{} + for _, r := range s.routeTable { if r.NIC != id { // Keep this route. s.routeTable[n] = r n++ } } + clear(s.routeTable[n:]) s.routeTable = s.routeTable[:n] s.routeMu.Unlock() @@ -1166,9 +1192,9 @@ func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocol return nic.PrimaryAddress(protocol) } -func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint { +func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr, srcHint tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint { if localAddr.BitLen() == 0 { - return nic.primaryEndpoint(netProto, remoteAddr) + return nic.primaryEndpoint(netProto, remoteAddr, srcHint) } return nic.findEndpoint(netProto, localAddr, CanBePrimaryEndpoint) } @@ -1186,7 +1212,7 @@ func (s *Stack) NewRouteForMulticast(nicID tcpip.NICID, remoteAddr tcpip.Address return nil } - if addressEndpoint := s.getAddressEP(nic, tcpip.Address{} /* localAddr */, remoteAddr, netProto); addressEndpoint != nil { + if addressEndpoint := s.getAddressEP(nic, tcpip.Address{} /* localAddr */, remoteAddr, tcpip.Address{} /* srcHint */, netProto); addressEndpoint != nil { return constructAndValidateRoute(netProto, addressEndpoint, nic, nic, tcpip.Address{} /* gateway */, tcpip.Address{} /* localAddr */, remoteAddr, s.handleLocal, false /* multicastLoop */) } return nil @@ -1290,6 +1316,28 @@ func isNICForwarding(nic *nic, proto tcpip.NetworkProtocolNumber) bool { } } +// findRouteWithLocalAddrFromAnyInterfaceRLocked returns a route to the given +// destination address, leaving through the given NIC. +// +// Rather than preferring to find a route that uses a local address assigned to +// the outgoing interface, it finds any NIC that holds a matching local address +// endpoint. +// +// +checklocksread:s.mu +func (s *Stack) findRouteWithLocalAddrFromAnyInterfaceRLocked(outgoingNIC *nic, localAddr, remoteAddr, srcHint, gateway tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) *Route { + for _, aNIC := range s.nics { + addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, srcHint, netProto) + if addressEndpoint == nil { + continue + } + + if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, outgoingNIC, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil { + return r + } + } + return nil +} + // FindRoute creates a route to the given destination address, leaving through // the given NIC and local address (if provided). // @@ -1299,12 +1347,17 @@ func isNICForwarding(nic *nic, proto tcpip.NetworkProtocolNumber) bool { // leave through any interface unless the route is link-local. // // If no local address is provided, the stack will select a local address. If no -// remote address is provided, the stack wil use a remote address equal to the +// remote address is provided, the stack will use a remote address equal to the // local address. func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) (*Route, tcpip.Error) { s.mu.RLock() defer s.mu.RUnlock() + // Reject attempts to use unsupported protocols. + if !s.CheckNetworkProtocol(netProto) { + return nil, &tcpip.ErrUnknownProtocol{} + } + isLinkLocal := header.IsV6LinkLocalUnicastAddress(remoteAddr) || header.IsV6LinkLocalMulticastAddress(remoteAddr) isLocalBroadcast := remoteAddr == header.IPv4Broadcast isMulticast := header.IsV4MulticastAddress(remoteAddr) || header.IsV6MulticastAddress(remoteAddr) @@ -1321,7 +1374,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n // through the interface if the interface is valid and enabled. if id != 0 && !needRoute { if nic, ok := s.nics[id]; ok && nic.Enabled() { - if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil { + if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, tcpip.Address{} /* srcHint */, netProto); addressEndpoint != nil { return makeRoute( netProto, tcpip.Address{}, /* gateway */ @@ -1361,7 +1414,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n } if id == 0 || id == route.NIC { - if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil { + if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, route.SourceHint, netProto); addressEndpoint != nil { var gateway tcpip.Address if needRoute { gateway = route.Gateway @@ -1374,15 +1427,27 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n } } - // If the stack has forwarding enabled and we haven't found a valid route - // to the remote address yet, keep track of the first valid route. We - // keep iterating because we prefer routes that let us use a local - // address that is assigned to the outgoing interface. There is no - // requirement to do this from any RFC but simply a choice made to better - // follow a strong host model which the netstack follows at the time of - // writing. + // If the stack has forwarding enabled, we haven't found a valid route to + // the remote address yet, and we are routing locally generated traffic, + // keep track of the first valid route. We keep iterating because we + // prefer routes that let us use a local address that is assigned to the + // outgoing interface. There is no requirement to do this from any RFC + // but simply a choice made to better follow a strong host model which + // the netstack follows at the time of writing. + // + // Note that for incoming traffic that we are forwarding (for which the + // NIC and local address are unspecified), we do not keep iterating, as + // there is no reason to prefer routes that let us use a local address + // when routing forwarded (as opposed to locally-generated) traffic. + locallyGenerated := (id != 0 || localAddr != tcpip.Address{}) if onlyGlobalAddresses && chosenRoute.Equal(tcpip.Route{}) && isNICForwarding(nic, netProto) { - chosenRoute = route + if locallyGenerated { + chosenRoute = route + continue + } + if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, route.SourceHint, route.Gateway, netProto, multicastLoop); r != nil { + return r + } } } @@ -1408,7 +1473,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n // Use the specified NIC to get the local address endpoint. if id != 0 { if aNIC, ok := s.nics[id]; ok { - if addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto); addressEndpoint != nil { + if addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, chosenRoute.SourceHint, netProto); addressEndpoint != nil { if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil { return r, nil } @@ -1422,15 +1487,8 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n if id == 0 { // If an interface is not specified, try to find a NIC that holds the local // address endpoint to construct a route. - for _, aNIC := range s.nics { - addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto) - if addressEndpoint == nil { - continue - } - - if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil { - return r, nil - } + if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, chosenRoute.SourceHint, gateway, netProto, multicastLoop); r != nil { + return r, nil } } } @@ -1593,7 +1651,7 @@ func (s *Stack) AddStaticNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProto } // RemoveNeighbor removes an IP to MAC address association previously created -// either automically or by AddStaticNeighbor. Returns ErrBadAddress if there +// either automatically or by AddStaticNeighbor. Returns ErrBadAddress if there // is no association with the provided address. func (s *Stack) RemoveNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.Error { s.mu.RLock() @@ -1679,16 +1737,26 @@ func (s *Stack) UnregisterRawTransportEndpoint(netProto tcpip.NetworkProtocolNum // RegisterRestoredEndpoint records e as an endpoint that has been restored on // this stack. -func (s *Stack) RegisterRestoredEndpoint(e ResumableEndpoint) { +func (s *Stack) RegisterRestoredEndpoint(e RestoredEndpoint) { s.mu.Lock() + defer s.mu.Unlock() + + s.restoredEndpoints = append(s.restoredEndpoints, e) +} + +// RegisterResumableEndpoint records e as an endpoint that has to be resumed. +func (s *Stack) RegisterResumableEndpoint(e ResumableEndpoint) { + s.mu.Lock() + defer s.mu.Unlock() + s.resumableEndpoints = append(s.resumableEndpoints, e) - s.mu.Unlock() } // RegisteredEndpoints returns all endpoints which are currently registered. func (s *Stack) RegisteredEndpoints() []TransportEndpoint { s.mu.Lock() defer s.mu.Unlock() + var es []TransportEndpoint for _, e := range s.demux.protocol { es = append(es, e.transportEndpoints()...) @@ -1699,11 +1767,12 @@ func (s *Stack) RegisteredEndpoints() []TransportEndpoint { // CleanupEndpoints returns endpoints currently in the cleanup state. func (s *Stack) CleanupEndpoints() []TransportEndpoint { s.cleanupEndpointsMu.Lock() + defer s.cleanupEndpointsMu.Unlock() + es := make([]TransportEndpoint, 0, len(s.cleanupEndpoints)) for e := range s.cleanupEndpoints { es = append(es, e) } - s.cleanupEndpointsMu.Unlock() return es } @@ -1711,10 +1780,11 @@ func (s *Stack) CleanupEndpoints() []TransportEndpoint { // for restoring a stack after a save. func (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) { s.cleanupEndpointsMu.Lock() + defer s.cleanupEndpointsMu.Unlock() + for _, e := range es { s.cleanupEndpoints[e] = struct{}{} } - s.cleanupEndpointsMu.Unlock() } // Close closes all currently registered transport endpoints. @@ -1777,17 +1847,32 @@ func (s *Stack) Pause() { } } -// Resume restarts the stack after a restore. This must be called after the +// Restore restarts the stack after a restore. This must be called after the // entire system has been restored. +func (s *Stack) Restore() { + // RestoredEndpoint.Restore() may call other methods on s, so we can't hold + // s.mu while restoring the endpoints. + s.mu.Lock() + eps := s.restoredEndpoints + s.restoredEndpoints = nil + s.mu.Unlock() + for _, e := range eps { + e.Restore(s) + } + // Now resume any protocol level background workers. + for _, p := range s.transportProtocols { + p.proto.Resume() + } +} + +// Resume resumes the stack after a save. func (s *Stack) Resume() { - // ResumableEndpoint.Resume() may call other methods on s, so we can't hold - // s.mu while resuming the endpoints. s.mu.Lock() eps := s.resumableEndpoints s.resumableEndpoints = nil s.mu.Unlock() for _, e := range eps { - e.Resume(s) + e.Resume() } // Now resume any protocol level background workers. for _, p := range s.transportProtocols { @@ -1806,10 +1891,7 @@ func (s *Stack) RegisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.Network if nicID == 0 { // Register with each NIC. for _, nic := range s.nics { - if err := nic.registerPacketEndpoint(netProto, ep); err != nil { - s.unregisterPacketEndpointLocked(0, netProto, ep) - return err - } + nic.registerPacketEndpoint(netProto, ep) } return nil } @@ -1819,9 +1901,7 @@ func (s *Stack) RegisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.Network if !ok { return &tcpip.ErrUnknownNICID{} } - if err := nic.registerPacketEndpoint(netProto, ep); err != nil { - return err - } + nic.registerPacketEndpoint(netProto, ep) return nil } @@ -1887,7 +1967,7 @@ func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNum }) defer pkt.DecRef() pkt.NetworkProtocolNumber = proto - return nic.writeRawPacket(pkt) + return nic.writeRawPacketWithLinkHeaderInPayload(pkt) } // NetworkProtocolInstance returns the protocol instance in the stack for the @@ -2064,15 +2144,16 @@ func (s *Stack) Seed() uint32 { return s.seed } -// Rand returns a reference to a pseudo random generator that can be used -// to generate random numbers as required. -func (s *Stack) Rand() *rand.Rand { - return s.randomGenerator +// InsecureRNG returns a reference to a pseudo random generator that can be used +// to generate random numbers as required. It is not cryptographically secure +// and should not be used for security sensitive work. +func (s *Stack) InsecureRNG() *rand.Rand { + return s.insecureRNG } // SecureRNG returns the stack's cryptographically secure random number // generator. -func (s *Stack) SecureRNG() io.Reader { +func (s *Stack) SecureRNG() cryptorand.RNG { return s.secureRNG } @@ -2106,7 +2187,7 @@ const ( // ParsePacketBufferTransport parses the provided packet buffer's transport // header. -func (s *Stack) ParsePacketBufferTransport(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) ParseResult { +func (s *Stack) ParsePacketBufferTransport(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) ParseResult { pkt.TransportProtocolNumber = protocol // Parse the transport header if present. state, ok := s.transportProtocols[protocol] diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go index 23e5d098..ef672873 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_mutex.go @@ -17,7 +17,7 @@ type stackRWMutex struct { var stacklockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type stacklockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go index cb566962..32b8ff63 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go @@ -3,6 +3,8 @@ package stack import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -25,9 +27,9 @@ func (r *addressStateRefs) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (r *addressStateRefs) StateLoad(stateSourceObject state.Source) { +func (r *addressStateRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.refCount) - stateSourceObject.AfterLoad(r.afterLoad) + stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) }) } func (t *tuple) StateTypeName() string { @@ -54,10 +56,10 @@ func (t *tuple) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &t.tupleID) } -func (t *tuple) afterLoad() {} +func (t *tuple) afterLoad(context.Context) {} // +checklocksignore -func (t *tuple) StateLoad(stateSourceObject state.Source) { +func (t *tuple) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.tupleEntry) stateSourceObject.Load(1, &t.conn) stateSourceObject.Load(2, &t.reply) @@ -92,10 +94,10 @@ func (ti *tupleID) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &ti.netProto) } -func (ti *tupleID) afterLoad() {} +func (ti *tupleID) afterLoad(context.Context) {} // +checklocksignore -func (ti *tupleID) StateLoad(stateSourceObject state.Source) { +func (ti *tupleID) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &ti.srcAddr) stateSourceObject.Load(1, &ti.srcPortOrEchoRequestIdent) stateSourceObject.Load(2, &ti.dstAddr) @@ -138,10 +140,10 @@ func (cn *conn) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(8, &cn.lastUsed) } -func (cn *conn) afterLoad() {} +func (cn *conn) afterLoad(context.Context) {} // +checklocksignore -func (cn *conn) StateLoad(stateSourceObject state.Source) { +func (cn *conn) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &cn.ct) stateSourceObject.Load(1, &cn.original) stateSourceObject.Load(2, &cn.reply) @@ -177,10 +179,10 @@ func (ct *ConnTrack) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &ct.buckets) } -func (ct *ConnTrack) afterLoad() {} +func (ct *ConnTrack) afterLoad(context.Context) {} // +checklocksignore -func (ct *ConnTrack) StateLoad(stateSourceObject state.Source) { +func (ct *ConnTrack) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &ct.seed) stateSourceObject.Load(1, &ct.clock) stateSourceObject.Load(2, &ct.rand) @@ -205,10 +207,10 @@ func (bkt *bucket) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &bkt.tuples) } -func (bkt *bucket) afterLoad() {} +func (bkt *bucket) afterLoad(context.Context) {} // +checklocksignore -func (bkt *bucket) StateLoad(stateSourceObject state.Source) { +func (bkt *bucket) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &bkt.tuples) } @@ -232,10 +234,10 @@ func (l *groPacketList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *groPacketList) afterLoad() {} +func (l *groPacketList) afterLoad(context.Context) {} // +checklocksignore -func (l *groPacketList) StateLoad(stateSourceObject state.Source) { +func (l *groPacketList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -260,10 +262,10 @@ func (e *groPacketEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *groPacketEntry) afterLoad() {} +func (e *groPacketEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *groPacketEntry) StateLoad(stateSourceObject state.Source) { +func (e *groPacketEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } @@ -295,14 +297,14 @@ func (it *IPTables) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (it *IPTables) StateLoad(stateSourceObject state.Source) { +func (it *IPTables) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &it.connections) stateSourceObject.Load(1, &it.reaper) stateSourceObject.Load(2, &it.mu) stateSourceObject.Load(3, &it.v4Tables) stateSourceObject.Load(4, &it.v6Tables) stateSourceObject.Load(5, &it.modified) - stateSourceObject.AfterLoad(it.afterLoad) + stateSourceObject.AfterLoad(func() { it.afterLoad(ctx) }) } func (table *Table) StateTypeName() string { @@ -327,10 +329,10 @@ func (table *Table) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &table.Underflows) } -func (table *Table) afterLoad() {} +func (table *Table) afterLoad(context.Context) {} // +checklocksignore -func (table *Table) StateLoad(stateSourceObject state.Source) { +func (table *Table) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &table.Rules) stateSourceObject.Load(1, &table.BuiltinChains) stateSourceObject.Load(2, &table.Underflows) @@ -358,10 +360,10 @@ func (r *Rule) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &r.Target) } -func (r *Rule) afterLoad() {} +func (r *Rule) afterLoad(context.Context) {} // +checklocksignore -func (r *Rule) StateLoad(stateSourceObject state.Source) { +func (r *Rule) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.Filter) stateSourceObject.Load(1, &r.Matchers) stateSourceObject.Load(2, &r.Target) @@ -411,10 +413,10 @@ func (fl *IPHeaderFilter) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(13, &fl.OutputInterfaceInvert) } -func (fl *IPHeaderFilter) afterLoad() {} +func (fl *IPHeaderFilter) afterLoad(context.Context) {} // +checklocksignore -func (fl *IPHeaderFilter) StateLoad(stateSourceObject state.Source) { +func (fl *IPHeaderFilter) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &fl.Protocol) stateSourceObject.Load(1, &fl.CheckProtocol) stateSourceObject.Load(2, &fl.Dst) @@ -451,10 +453,10 @@ func (l *neighborEntryList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *neighborEntryList) afterLoad() {} +func (l *neighborEntryList) afterLoad(context.Context) {} // +checklocksignore -func (l *neighborEntryList) StateLoad(stateSourceObject state.Source) { +func (l *neighborEntryList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -479,19 +481,19 @@ func (e *neighborEntryEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *neighborEntryEntry) afterLoad() {} +func (e *neighborEntryEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *neighborEntryEntry) StateLoad(stateSourceObject state.Source) { +func (e *neighborEntryEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } -func (p *PacketBuffer) StateTypeName() string { +func (pk *PacketBuffer) StateTypeName() string { return "pkg/tcpip/stack.PacketBuffer" } -func (p *PacketBuffer) StateFields() []string { +func (pk *PacketBuffer) StateFields() []string { return []string{ "packetBufferRefs", "buf", @@ -515,55 +517,55 @@ func (p *PacketBuffer) StateFields() []string { } } -func (p *PacketBuffer) beforeSave() {} +func (pk *PacketBuffer) beforeSave() {} // +checklocksignore -func (p *PacketBuffer) StateSave(stateSinkObject state.Sink) { - p.beforeSave() - stateSinkObject.Save(0, &p.packetBufferRefs) - stateSinkObject.Save(1, &p.buf) - stateSinkObject.Save(2, &p.reserved) - stateSinkObject.Save(3, &p.pushed) - stateSinkObject.Save(4, &p.consumed) - stateSinkObject.Save(5, &p.headers) - stateSinkObject.Save(6, &p.NetworkProtocolNumber) - stateSinkObject.Save(7, &p.TransportProtocolNumber) - stateSinkObject.Save(8, &p.Hash) - stateSinkObject.Save(9, &p.Owner) - stateSinkObject.Save(10, &p.EgressRoute) - stateSinkObject.Save(11, &p.GSOOptions) - stateSinkObject.Save(12, &p.snatDone) - stateSinkObject.Save(13, &p.dnatDone) - stateSinkObject.Save(14, &p.PktType) - stateSinkObject.Save(15, &p.NICID) - stateSinkObject.Save(16, &p.RXChecksumValidated) - stateSinkObject.Save(17, &p.NetworkPacketInfo) - stateSinkObject.Save(18, &p.tuple) +func (pk *PacketBuffer) StateSave(stateSinkObject state.Sink) { + pk.beforeSave() + stateSinkObject.Save(0, &pk.packetBufferRefs) + stateSinkObject.Save(1, &pk.buf) + stateSinkObject.Save(2, &pk.reserved) + stateSinkObject.Save(3, &pk.pushed) + stateSinkObject.Save(4, &pk.consumed) + stateSinkObject.Save(5, &pk.headers) + stateSinkObject.Save(6, &pk.NetworkProtocolNumber) + stateSinkObject.Save(7, &pk.TransportProtocolNumber) + stateSinkObject.Save(8, &pk.Hash) + stateSinkObject.Save(9, &pk.Owner) + stateSinkObject.Save(10, &pk.EgressRoute) + stateSinkObject.Save(11, &pk.GSOOptions) + stateSinkObject.Save(12, &pk.snatDone) + stateSinkObject.Save(13, &pk.dnatDone) + stateSinkObject.Save(14, &pk.PktType) + stateSinkObject.Save(15, &pk.NICID) + stateSinkObject.Save(16, &pk.RXChecksumValidated) + stateSinkObject.Save(17, &pk.NetworkPacketInfo) + stateSinkObject.Save(18, &pk.tuple) } -func (p *PacketBuffer) afterLoad() {} +func (pk *PacketBuffer) afterLoad(context.Context) {} // +checklocksignore -func (p *PacketBuffer) StateLoad(stateSourceObject state.Source) { - stateSourceObject.Load(0, &p.packetBufferRefs) - stateSourceObject.Load(1, &p.buf) - stateSourceObject.Load(2, &p.reserved) - stateSourceObject.Load(3, &p.pushed) - stateSourceObject.Load(4, &p.consumed) - stateSourceObject.Load(5, &p.headers) - stateSourceObject.Load(6, &p.NetworkProtocolNumber) - stateSourceObject.Load(7, &p.TransportProtocolNumber) - stateSourceObject.Load(8, &p.Hash) - stateSourceObject.Load(9, &p.Owner) - stateSourceObject.Load(10, &p.EgressRoute) - stateSourceObject.Load(11, &p.GSOOptions) - stateSourceObject.Load(12, &p.snatDone) - stateSourceObject.Load(13, &p.dnatDone) - stateSourceObject.Load(14, &p.PktType) - stateSourceObject.Load(15, &p.NICID) - stateSourceObject.Load(16, &p.RXChecksumValidated) - stateSourceObject.Load(17, &p.NetworkPacketInfo) - stateSourceObject.Load(18, &p.tuple) +func (pk *PacketBuffer) StateLoad(ctx context.Context, stateSourceObject state.Source) { + stateSourceObject.Load(0, &pk.packetBufferRefs) + stateSourceObject.Load(1, &pk.buf) + stateSourceObject.Load(2, &pk.reserved) + stateSourceObject.Load(3, &pk.pushed) + stateSourceObject.Load(4, &pk.consumed) + stateSourceObject.Load(5, &pk.headers) + stateSourceObject.Load(6, &pk.NetworkProtocolNumber) + stateSourceObject.Load(7, &pk.TransportProtocolNumber) + stateSourceObject.Load(8, &pk.Hash) + stateSourceObject.Load(9, &pk.Owner) + stateSourceObject.Load(10, &pk.EgressRoute) + stateSourceObject.Load(11, &pk.GSOOptions) + stateSourceObject.Load(12, &pk.snatDone) + stateSourceObject.Load(13, &pk.dnatDone) + stateSourceObject.Load(14, &pk.PktType) + stateSourceObject.Load(15, &pk.NICID) + stateSourceObject.Load(16, &pk.RXChecksumValidated) + stateSourceObject.Load(17, &pk.NetworkPacketInfo) + stateSourceObject.Load(18, &pk.tuple) } func (h *headerInfo) StateTypeName() string { @@ -586,10 +588,10 @@ func (h *headerInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &h.length) } -func (h *headerInfo) afterLoad() {} +func (h *headerInfo) afterLoad(context.Context) {} // +checklocksignore -func (h *headerInfo) StateLoad(stateSourceObject state.Source) { +func (h *headerInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &h.offset) stateSourceObject.Load(1, &h.length) } @@ -612,10 +614,10 @@ func (d *PacketData) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &d.pk) } -func (d *PacketData) afterLoad() {} +func (d *PacketData) afterLoad(context.Context) {} // +checklocksignore -func (d *PacketData) StateLoad(stateSourceObject state.Source) { +func (d *PacketData) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &d.pk) } @@ -637,10 +639,10 @@ func (pl *PacketBufferList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &pl.pbs) } -func (pl *PacketBufferList) afterLoad() {} +func (pl *PacketBufferList) afterLoad(context.Context) {} // +checklocksignore -func (pl *PacketBufferList) StateLoad(stateSourceObject state.Source) { +func (pl *PacketBufferList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &pl.pbs) } @@ -663,9 +665,9 @@ func (r *packetBufferRefs) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (r *packetBufferRefs) StateLoad(stateSourceObject state.Source) { +func (r *packetBufferRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.refCount) - stateSourceObject.AfterLoad(r.afterLoad) + stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) }) } func (t *TransportEndpointID) StateTypeName() string { @@ -692,10 +694,10 @@ func (t *TransportEndpointID) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &t.RemoteAddress) } -func (t *TransportEndpointID) afterLoad() {} +func (t *TransportEndpointID) afterLoad(context.Context) {} // +checklocksignore -func (t *TransportEndpointID) StateLoad(stateSourceObject state.Source) { +func (t *TransportEndpointID) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.LocalPort) stateSourceObject.Load(1, &t.LocalAddress) stateSourceObject.Load(2, &t.RemotePort) @@ -722,10 +724,10 @@ func (n *NetworkPacketInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &n.IsForwardedPacket) } -func (n *NetworkPacketInfo) afterLoad() {} +func (n *NetworkPacketInfo) afterLoad(context.Context) {} // +checklocksignore -func (n *NetworkPacketInfo) StateLoad(stateSourceObject state.Source) { +func (n *NetworkPacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &n.LocalAddressBroadcast) stateSourceObject.Load(1, &n.IsForwardedPacket) } @@ -766,10 +768,10 @@ func (g *GSO) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &g.MaxSize) } -func (g *GSO) afterLoad() {} +func (g *GSO) afterLoad(context.Context) {} // +checklocksignore -func (g *GSO) StateLoad(stateSourceObject state.Source) { +func (g *GSO) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &g.Type) stateSourceObject.Load(1, &g.NeedsCsum) stateSourceObject.Load(2, &g.CsumOffset) @@ -806,10 +808,10 @@ func (r *routeInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &r.Loop) } -func (r *routeInfo) afterLoad() {} +func (r *routeInfo) afterLoad(context.Context) {} // +checklocksignore -func (r *routeInfo) StateLoad(stateSourceObject state.Source) { +func (r *routeInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.RemoteAddress) stateSourceObject.Load(1, &r.LocalAddress) stateSourceObject.Load(2, &r.LocalLinkAddress) @@ -838,10 +840,10 @@ func (r *RouteInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &r.RemoteLinkAddress) } -func (r *RouteInfo) afterLoad() {} +func (r *RouteInfo) afterLoad(context.Context) {} // +checklocksignore -func (r *RouteInfo) StateLoad(stateSourceObject state.Source) { +func (r *RouteInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.routeInfo) stateSourceObject.Load(1, &r.RemoteLinkAddress) } @@ -874,10 +876,10 @@ func (t *TransportEndpointInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &t.RegisterNICID) } -func (t *TransportEndpointInfo) afterLoad() {} +func (t *TransportEndpointInfo) afterLoad(context.Context) {} // +checklocksignore -func (t *TransportEndpointInfo) StateLoad(stateSourceObject state.Source) { +func (t *TransportEndpointInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.NetProto) stateSourceObject.Load(1, &t.TransProto) stateSourceObject.Load(2, &t.ID) @@ -920,10 +922,10 @@ func (t *TCPCubicState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(8, &t.WEst) } -func (t *TCPCubicState) afterLoad() {} +func (t *TCPCubicState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPCubicState) StateLoad(stateSourceObject state.Source) { +func (t *TCPCubicState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.WLastMax) stateSourceObject.Load(1, &t.WMax) stateSourceObject.Load(2, &t.T) @@ -971,10 +973,10 @@ func (t *TCPRACKState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(9, &t.RTTSeq) } -func (t *TCPRACKState) afterLoad() {} +func (t *TCPRACKState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPRACKState) StateLoad(stateSourceObject state.Source) { +func (t *TCPRACKState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.XmitTime) stateSourceObject.Load(1, &t.EndSequence) stateSourceObject.Load(2, &t.FACK) @@ -1011,10 +1013,10 @@ func (t *TCPEndpointID) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &t.RemoteAddress) } -func (t *TCPEndpointID) afterLoad() {} +func (t *TCPEndpointID) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPEndpointID) StateLoad(stateSourceObject state.Source) { +func (t *TCPEndpointID) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.LocalPort) stateSourceObject.Load(1, &t.LocalAddress) stateSourceObject.Load(2, &t.RemotePort) @@ -1049,10 +1051,10 @@ func (t *TCPFastRecoveryState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &t.RescueRxt) } -func (t *TCPFastRecoveryState) afterLoad() {} +func (t *TCPFastRecoveryState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPFastRecoveryState) StateLoad(stateSourceObject state.Source) { +func (t *TCPFastRecoveryState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.Active) stateSourceObject.Load(1, &t.First) stateSourceObject.Load(2, &t.Last) @@ -1085,10 +1087,10 @@ func (t *TCPReceiverState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &t.PendingBufUsed) } -func (t *TCPReceiverState) afterLoad() {} +func (t *TCPReceiverState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPReceiverState) StateLoad(stateSourceObject state.Source) { +func (t *TCPReceiverState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.RcvNxt) stateSourceObject.Load(1, &t.RcvAcc) stateSourceObject.Load(2, &t.RcvWndScale) @@ -1117,10 +1119,10 @@ func (t *TCPRTTState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &t.SRTTInited) } -func (t *TCPRTTState) afterLoad() {} +func (t *TCPRTTState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPRTTState) StateLoad(stateSourceObject state.Source) { +func (t *TCPRTTState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.SRTT) stateSourceObject.Load(1, &t.RTTVar) stateSourceObject.Load(2, &t.SRTTInited) @@ -1188,10 +1190,10 @@ func (t *TCPSenderState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(22, &t.SpuriousRecovery) } -func (t *TCPSenderState) afterLoad() {} +func (t *TCPSenderState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPSenderState) StateLoad(stateSourceObject state.Source) { +func (t *TCPSenderState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.LastSendTime) stateSourceObject.Load(1, &t.DupAckCount) stateSourceObject.Load(2, &t.SndCwnd) @@ -1239,10 +1241,10 @@ func (t *TCPSACKInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &t.MaxSACKED) } -func (t *TCPSACKInfo) afterLoad() {} +func (t *TCPSACKInfo) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPSACKInfo) StateLoad(stateSourceObject state.Source) { +func (t *TCPSACKInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.Blocks) stateSourceObject.Load(1, &t.ReceivedBlocks) stateSourceObject.Load(2, &t.MaxSACKED) @@ -1282,10 +1284,10 @@ func (r *RcvBufAutoTuneParams) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(8, &r.Disabled) } -func (r *RcvBufAutoTuneParams) afterLoad() {} +func (r *RcvBufAutoTuneParams) afterLoad(context.Context) {} // +checklocksignore -func (r *RcvBufAutoTuneParams) StateLoad(stateSourceObject state.Source) { +func (r *RcvBufAutoTuneParams) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.MeasureTime) stateSourceObject.Load(1, &r.CopiedBytes) stateSourceObject.Load(2, &r.PrevCopiedBytes) @@ -1319,10 +1321,10 @@ func (t *TCPRcvBufState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &t.RcvClosed) } -func (t *TCPRcvBufState) afterLoad() {} +func (t *TCPRcvBufState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPRcvBufState) StateLoad(stateSourceObject state.Source) { +func (t *TCPRcvBufState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.RcvBufUsed) stateSourceObject.Load(1, &t.RcvAutoParams) stateSourceObject.Load(2, &t.RcvClosed) @@ -1356,10 +1358,10 @@ func (t *TCPSndBufState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &t.AutoTuneSndBufDisabled) } -func (t *TCPSndBufState) afterLoad() {} +func (t *TCPSndBufState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPSndBufState) StateLoad(stateSourceObject state.Source) { +func (t *TCPSndBufState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.SndBufSize) stateSourceObject.Load(1, &t.SndBufUsed) stateSourceObject.Load(2, &t.SndClosed) @@ -1392,10 +1394,10 @@ func (t *TCPEndpointStateInner) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &t.RecentTS) } -func (t *TCPEndpointStateInner) afterLoad() {} +func (t *TCPEndpointStateInner) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPEndpointStateInner) StateLoad(stateSourceObject state.Source) { +func (t *TCPEndpointStateInner) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.TSOffset) stateSourceObject.Load(1, &t.SACKPermitted) stateSourceObject.Load(2, &t.SendTSOk) @@ -1434,10 +1436,10 @@ func (t *TCPEndpointState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(7, &t.Sender) } -func (t *TCPEndpointState) afterLoad() {} +func (t *TCPEndpointState) afterLoad(context.Context) {} // +checklocksignore -func (t *TCPEndpointState) StateLoad(stateSourceObject state.Source) { +func (t *TCPEndpointState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &t.TCPEndpointStateInner) stateSourceObject.Load(1, &t.ID) stateSourceObject.Load(2, &t.SegTime) @@ -1474,10 +1476,10 @@ func (ep *multiPortEndpoint) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(4, &ep.endpoints) } -func (ep *multiPortEndpoint) afterLoad() {} +func (ep *multiPortEndpoint) afterLoad(context.Context) {} // +checklocksignore -func (ep *multiPortEndpoint) StateLoad(stateSourceObject state.Source) { +func (ep *multiPortEndpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &ep.demux) stateSourceObject.Load(1, &ep.netProto) stateSourceObject.Load(2, &ep.transProto) @@ -1505,10 +1507,10 @@ func (l *tupleList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *tupleList) afterLoad() {} +func (l *tupleList) afterLoad(context.Context) {} // +checklocksignore -func (l *tupleList) StateLoad(stateSourceObject state.Source) { +func (l *tupleList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -1533,10 +1535,10 @@ func (e *tupleEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *tupleEntry) afterLoad() {} +func (e *tupleEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *tupleEntry) StateLoad(stateSourceObject state.Source) { +func (e *tupleEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go index f1593040..6f9075b5 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/state_conn_mutex.go @@ -17,7 +17,7 @@ type stateConnRWMutex struct { var stateConnlockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type stateConnlockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go index 44b866db..3393e6b2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/tcp.go @@ -15,6 +15,7 @@ package stack import ( + "context" "time" "gvisor.dev/gvisor/pkg/atomicbitops" @@ -24,6 +25,19 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/seqnum" ) +// contextID is this package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxRestoreStack is a Context.Value key for the stack to be used in restore. + CtxRestoreStack contextID = iota +) + +// RestoreStackFromContext returns the stack to be used during restore. +func RestoreStackFromContext(ctx context.Context) *Stack { + return ctx.Value(CtxRestoreStack).(*Stack) +} + // TCPProbeFunc is the expected function type for a TCP probe function to be // passed to stack.AddTCPProbe. type TCPProbeFunc func(s *TCPEndpointState) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go index 6d38b637..089f214b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go @@ -155,7 +155,7 @@ func (epsByNIC *endpointsByNIC) transportEndpoints() []TransportEndpoint { // handlePacket is called by the stack when new packets arrive to this transport // endpoint. It returns false if the packet could not be matched to any // transport endpoint, true otherwise. -func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt PacketBufferPtr) bool { +func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt *PacketBuffer) bool { epsByNIC.mu.RLock() mpep, ok := epsByNIC.endpoints[pkt.NICID] @@ -187,7 +187,7 @@ func (epsByNIC *endpointsByNIC) handlePacket(id TransportEndpointID, pkt PacketB } // handleError delivers an error to the transport endpoint identified by id. -func (epsByNIC *endpointsByNIC) handleError(n *nic, id TransportEndpointID, transErr TransportError, pkt PacketBufferPtr) { +func (epsByNIC *endpointsByNIC) handleError(n *nic, id TransportEndpointID, transErr TransportError, pkt *PacketBuffer) { epsByNIC.mu.RLock() mpep, ok := epsByNIC.endpoints[n.ID()] @@ -278,7 +278,7 @@ type transportDemuxer struct { // the dispatcher to delivery packets to the QueuePacket method instead of // calling HandlePacket directly on the endpoint. type queuedTransportProtocol interface { - QueuePacket(ep TransportEndpoint, id TransportEndpointID, pkt PacketBufferPtr) + QueuePacket(ep TransportEndpoint, id TransportEndpointID, pkt *PacketBuffer) } func newTransportDemuxer(stack *Stack) *transportDemuxer { @@ -400,7 +400,7 @@ func (ep *multiPortEndpoint) selectEndpoint(id TransportEndpointID, seed uint32) return ep.endpoints[idx] } -func (ep *multiPortEndpoint) handlePacketAll(id TransportEndpointID, pkt PacketBufferPtr) { +func (ep *multiPortEndpoint) handlePacketAll(id TransportEndpointID, pkt *PacketBuffer) { ep.mu.RLock() queuedProtocol, mustQueue := ep.demux.queuedProtocols[protocolIDs{ep.netProto, ep.transProto}] // HandlePacket may modify pkt, so each endpoint needs @@ -546,7 +546,7 @@ func (d *transportDemuxer) unregisterEndpoint(netProtos []tcpip.NetworkProtocolN // deliverPacket attempts to find one or more matching transport endpoints, and // then, if matches are found, delivers the packet to them. Returns true if // the packet no longer needs to be handled. -func (d *transportDemuxer) deliverPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr, id TransportEndpointID) bool { +func (d *transportDemuxer) deliverPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer, id TransportEndpointID) bool { eps, ok := d.protocol[protocolIDs{pkt.NetworkProtocolNumber, protocol}] if !ok { return false @@ -599,7 +599,7 @@ func (d *transportDemuxer) deliverPacket(protocol tcpip.TransportProtocolNumber, // deliverRawPacket attempts to deliver the given packet and returns whether it // was delivered successfully. -func (d *transportDemuxer) deliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) bool { +func (d *transportDemuxer) deliverRawPacket(protocol tcpip.TransportProtocolNumber, pkt *PacketBuffer) bool { eps, ok := d.protocol[protocolIDs{pkt.NetworkProtocolNumber, protocol}] if !ok { return false @@ -633,7 +633,7 @@ func (d *transportDemuxer) deliverRawPacket(protocol tcpip.TransportProtocolNumb // endpoint. // // Returns true if the error was delivered. -func (d *transportDemuxer) deliverError(n *nic, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt PacketBufferPtr, id TransportEndpointID) bool { +func (d *transportDemuxer) deliverError(n *nic, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr TransportError, pkt *PacketBuffer, id TransportEndpointID) bool { eps, ok := d.protocol[protocolIDs{net, trans}] if !ok { return false @@ -718,7 +718,7 @@ func (d *transportDemuxer) unregisterRawEndpoint(netProto tcpip.NetworkProtocolN eps.mu.Unlock() } -func isInboundMulticastOrBroadcast(pkt PacketBufferPtr, localAddr tcpip.Address) bool { +func isInboundMulticastOrBroadcast(pkt *PacketBuffer, localAddr tcpip.Address) bool { return pkt.NetworkPacketInfo.LocalAddressBroadcast || header.IsV4MulticastAddress(localAddr) || header.IsV6MulticastAddress(localAddr) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go index 2098d77e..cb6f13d7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_endpoints_mutex.go @@ -17,7 +17,7 @@ type transportEndpointsRWMutex struct { var transportEndpointslockNames []string // lockNameIndex is used as an index passed to NestedLock and NestedUnlock, -// refering to an index within lockNames. +// referring to an index within lockNames. // Values are specified using the "consts" field of go_template_instance. type transportEndpointslockNameIndex int diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go index 25be1755..530b46ec 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go @@ -14,7 +14,10 @@ package tcpip -import "time" +import ( + "context" + "time" +) // beforeSave is invoked by stateify. func (s *stdClock) beforeSave() { @@ -22,6 +25,6 @@ func (s *stdClock) beforeSave() { } // afterLoad is invoked by stateify. -func (s *stdClock) afterLoad() { +func (s *stdClock) afterLoad(context.Context) { s.baseTime = time.Now() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go index 92ac54db..7c21c43c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go @@ -112,6 +112,11 @@ func (mt MonotonicTime) Sub(u MonotonicTime) time.Duration { return time.Unix(0, mt.nanoseconds).Sub(time.Unix(0, u.nanoseconds)) } +// Milliseconds returns the time in milliseconds. +func (mt MonotonicTime) Milliseconds() int64 { + return mt.nanoseconds / 1e6 +} + // A Clock provides the current time and schedules work for execution. // // Times returned by a Clock should always be used for application-visible @@ -316,17 +321,24 @@ func (a Address) MatchingPrefix(b Address) uint8 { // // +stateify savable type AddressMask struct { - mask string + mask [16]byte + length int } // MaskFrom returns a Mask based on str. +// +// MaskFrom may allocate, and so should not be in hot paths. func MaskFrom(str string) AddressMask { - return AddressMask{mask: str} + mask := AddressMask{length: len(str)} + copy(mask.mask[:], str) + return mask } // MaskFromBytes returns a Mask based on bs. func MaskFromBytes(bs []byte) AddressMask { - return AddressMask{mask: string(bs)} + mask := AddressMask{length: len(bs)} + copy(mask.mask[:], bs) + return mask } // String implements Stringer. @@ -337,23 +349,23 @@ func (m AddressMask) String() string { // AsSlice returns a as a byte slice. Callers should be careful as it can // return a window into existing memory. func (m *AddressMask) AsSlice() []byte { - return []byte(m.mask) + return []byte(m.mask[:m.length]) } // BitLen returns the length of the mask in bits. func (m AddressMask) BitLen() int { - return len(m.mask) * 8 + return m.length * 8 } // Len returns the length of the mask in bytes. func (m AddressMask) Len() int { - return len(m.mask) + return m.length } // Prefix returns the number of bits before the first host bit. func (m AddressMask) Prefix() int { p := 0 - for _, b := range []byte(m.mask) { + for _, b := range m.mask[:m.length] { p += bits.LeadingZeros8(^b) } return p @@ -890,7 +902,7 @@ type WriteOptions struct { // Atomic means that all data fetched from Payloader must be written to the // endpoint. If Atomic is false, then data fetched from the Payloader may be - // discarded if available endpoint buffer space is unsufficient. + // discarded if available endpoint buffer space is insufficient. Atomic bool // ControlMessages contains optional overrides used when writing a packet. @@ -1128,7 +1140,7 @@ type SettableSocketOption interface { isSettableSocketOption() } -// ICMPv6Filter specifes a filter for ICMPv6 types. +// ICMPv6Filter specifies a filter for ICMPv6 types. // // +stateify savable type ICMPv6Filter struct { @@ -1386,16 +1398,16 @@ func (*TCPTimeWaitReuseOption) isGettableTransportProtocolOption() {} func (*TCPTimeWaitReuseOption) isSettableTransportProtocolOption() {} const ( - // TCPTimeWaitReuseDisabled indicates reuse of port bound by endponts in TIME-WAIT cannot + // TCPTimeWaitReuseDisabled indicates reuse of port bound by endpoints in TIME-WAIT cannot // be reused for new connections. TCPTimeWaitReuseDisabled TCPTimeWaitReuseOption = iota - // TCPTimeWaitReuseGlobal indicates reuse of port bound by endponts in TIME-WAIT can + // TCPTimeWaitReuseGlobal indicates reuse of port bound by endpoints in TIME-WAIT can // be reused for new connections irrespective of the src/dest addresses. TCPTimeWaitReuseGlobal // TCPTimeWaitReuseLoopbackOnly indicates reuse of port bound by endpoint in TIME-WAIT can - // only be reused if the connection was a connection over loopback. i.e src/dest adddresses + // only be reused if the connection was a connection over loopback. i.e src/dest addresses // are loopback addresses. TCPTimeWaitReuseLoopbackOnly ) @@ -1494,6 +1506,10 @@ type Route struct { // NIC is the id of the nic to be used if this row is viable. NIC NICID + + // SourceHint indicates a preferred source address to use when NICs + // have multiple addresses. + SourceHint Address } // String implements the fmt.Stringer interface. @@ -1891,7 +1907,7 @@ type IPForwardingStats struct { UnknownOutputEndpoint *StatCounter // NoMulticastPendingQueueBufferSpace is the number of multicast packets that - // were dropped due to insufficent buffer space in the pending packet queue. + // were dropped due to insufficient buffer space in the pending packet queue. NoMulticastPendingQueueBufferSpace *StatCounter // OutgoingDeviceNoBufferSpace is the number of packets that were dropped due @@ -2157,6 +2173,11 @@ type TCPStats struct { // SpuriousRTORecovery is the number of spurious RTOs. SpuriousRTORecovery *StatCounter + + // ForwardMaxInFlightDrop is the number of connection requests that are + // dropped due to exceeding the maximum number of in-flight connection + // requests. + ForwardMaxInFlightDrop *StatCounter } // UDPStats collects UDP-specific stats. @@ -2297,11 +2318,11 @@ func (m *MultiIntegralStatCounterMap) Increment(key uint64) { type NICStats struct { // LINT.IfChange(NICStats) - // UnknownL3ProtocolRcvdPacketCounts records the number of packets recieved - // for each unknown or unsupported netowrk protocol number. + // UnknownL3ProtocolRcvdPacketCounts records the number of packets received + // for each unknown or unsupported network protocol number. UnknownL3ProtocolRcvdPacketCounts *IntegralStatCounterMap - // UnknownL4ProtocolRcvdPacketCounts records the number of packets recieved + // UnknownL4ProtocolRcvdPacketCounts records the number of packets received // for each unknown or unsupported transport protocol number. UnknownL4ProtocolRcvdPacketCounts *IntegralStatCounterMap @@ -2647,36 +2668,40 @@ func (a AddressWithPrefix) Subnet() Subnet { addrLen := a.Address.length if a.PrefixLen <= 0 { return Subnet{ - address: AddrFromSlice(bytes.Repeat([]byte{0}, addrLen)), - mask: MaskFromBytes(bytes.Repeat([]byte{0}, addrLen)), + address: Address{length: addrLen}, + mask: AddressMask{length: addrLen}, } } if a.PrefixLen >= addrLen*8 { - return Subnet{ + sub := Subnet{ address: a.Address, - mask: MaskFromBytes(bytes.Repeat([]byte{0xff}, addrLen)), + mask: AddressMask{length: addrLen}, } + for i := 0; i < addrLen; i++ { + sub.mask.mask[i] = 0xff + } + return sub } - sa := make([]byte, addrLen) - sm := make([]byte, addrLen) + sa := Address{length: addrLen} + sm := AddressMask{length: addrLen} n := uint(a.PrefixLen) for i := 0; i < addrLen; i++ { if n >= 8 { - sa[i] = a.Address.addr[i] - sm[i] = 0xff + sa.addr[i] = a.Address.addr[i] + sm.mask[i] = 0xff n -= 8 continue } - sm[i] = ^byte(0xff >> n) - sa[i] = a.Address.addr[i] & sm[i] + sm.mask[i] = ^byte(0xff >> n) + sa.addr[i] = a.Address.addr[i] & sm.mask[i] n = 0 } // For extra caution, call NewSubnet rather than directly creating the Subnet // value. If that fails it indicates a serious bug in this code, so panic is // in order. - s, err := NewSubnet(AddrFromSlice(sa), MaskFromBytes(sm)) + s, err := NewSubnet(sa, sm) if err != nil { panic("invalid subnet: " + err.Error()) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go index 5181e355..0603ff04 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state.go @@ -15,6 +15,7 @@ package tcpip import ( + "context" "time" ) @@ -22,6 +23,6 @@ func (c *ReceivableControlMessages) saveTimestamp() int64 { return c.Timestamp.UnixNano() } -func (c *ReceivableControlMessages) loadTimestamp(nsec int64) { +func (c *ReceivableControlMessages) loadTimestamp(_ context.Context, nsec int64) { c.Timestamp = time.Unix(0, nsec) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go index 7fce7aeb..d49ae9b9 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go @@ -3,6 +3,8 @@ package tcpip import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -21,10 +23,10 @@ func (e *ErrAborted) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrAborted) afterLoad() {} +func (e *ErrAborted) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrAborted) StateLoad(stateSourceObject state.Source) { +func (e *ErrAborted) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrAddressFamilyNotSupported) StateTypeName() string { @@ -42,10 +44,10 @@ func (e *ErrAddressFamilyNotSupported) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrAddressFamilyNotSupported) afterLoad() {} +func (e *ErrAddressFamilyNotSupported) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrAddressFamilyNotSupported) StateLoad(stateSourceObject state.Source) { +func (e *ErrAddressFamilyNotSupported) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrAlreadyBound) StateTypeName() string { @@ -63,10 +65,10 @@ func (e *ErrAlreadyBound) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrAlreadyBound) afterLoad() {} +func (e *ErrAlreadyBound) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrAlreadyBound) StateLoad(stateSourceObject state.Source) { +func (e *ErrAlreadyBound) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrAlreadyConnected) StateTypeName() string { @@ -84,10 +86,10 @@ func (e *ErrAlreadyConnected) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrAlreadyConnected) afterLoad() {} +func (e *ErrAlreadyConnected) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrAlreadyConnected) StateLoad(stateSourceObject state.Source) { +func (e *ErrAlreadyConnected) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrAlreadyConnecting) StateTypeName() string { @@ -105,10 +107,10 @@ func (e *ErrAlreadyConnecting) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrAlreadyConnecting) afterLoad() {} +func (e *ErrAlreadyConnecting) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrAlreadyConnecting) StateLoad(stateSourceObject state.Source) { +func (e *ErrAlreadyConnecting) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrBadAddress) StateTypeName() string { @@ -126,10 +128,10 @@ func (e *ErrBadAddress) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrBadAddress) afterLoad() {} +func (e *ErrBadAddress) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrBadAddress) StateLoad(stateSourceObject state.Source) { +func (e *ErrBadAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrBadBuffer) StateTypeName() string { @@ -147,10 +149,10 @@ func (e *ErrBadBuffer) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrBadBuffer) afterLoad() {} +func (e *ErrBadBuffer) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrBadBuffer) StateLoad(stateSourceObject state.Source) { +func (e *ErrBadBuffer) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrBadLocalAddress) StateTypeName() string { @@ -168,10 +170,10 @@ func (e *ErrBadLocalAddress) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrBadLocalAddress) afterLoad() {} +func (e *ErrBadLocalAddress) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrBadLocalAddress) StateLoad(stateSourceObject state.Source) { +func (e *ErrBadLocalAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrBroadcastDisabled) StateTypeName() string { @@ -189,10 +191,10 @@ func (e *ErrBroadcastDisabled) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrBroadcastDisabled) afterLoad() {} +func (e *ErrBroadcastDisabled) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrBroadcastDisabled) StateLoad(stateSourceObject state.Source) { +func (e *ErrBroadcastDisabled) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrClosedForReceive) StateTypeName() string { @@ -210,10 +212,10 @@ func (e *ErrClosedForReceive) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrClosedForReceive) afterLoad() {} +func (e *ErrClosedForReceive) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrClosedForReceive) StateLoad(stateSourceObject state.Source) { +func (e *ErrClosedForReceive) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrClosedForSend) StateTypeName() string { @@ -231,10 +233,10 @@ func (e *ErrClosedForSend) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrClosedForSend) afterLoad() {} +func (e *ErrClosedForSend) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrClosedForSend) StateLoad(stateSourceObject state.Source) { +func (e *ErrClosedForSend) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrConnectStarted) StateTypeName() string { @@ -252,10 +254,10 @@ func (e *ErrConnectStarted) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrConnectStarted) afterLoad() {} +func (e *ErrConnectStarted) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrConnectStarted) StateLoad(stateSourceObject state.Source) { +func (e *ErrConnectStarted) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrConnectionAborted) StateTypeName() string { @@ -273,10 +275,10 @@ func (e *ErrConnectionAborted) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrConnectionAborted) afterLoad() {} +func (e *ErrConnectionAborted) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrConnectionAborted) StateLoad(stateSourceObject state.Source) { +func (e *ErrConnectionAborted) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrConnectionRefused) StateTypeName() string { @@ -294,10 +296,10 @@ func (e *ErrConnectionRefused) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrConnectionRefused) afterLoad() {} +func (e *ErrConnectionRefused) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrConnectionRefused) StateLoad(stateSourceObject state.Source) { +func (e *ErrConnectionRefused) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrConnectionReset) StateTypeName() string { @@ -315,10 +317,10 @@ func (e *ErrConnectionReset) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrConnectionReset) afterLoad() {} +func (e *ErrConnectionReset) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrConnectionReset) StateLoad(stateSourceObject state.Source) { +func (e *ErrConnectionReset) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrDestinationRequired) StateTypeName() string { @@ -336,10 +338,10 @@ func (e *ErrDestinationRequired) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrDestinationRequired) afterLoad() {} +func (e *ErrDestinationRequired) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrDestinationRequired) StateLoad(stateSourceObject state.Source) { +func (e *ErrDestinationRequired) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrDuplicateAddress) StateTypeName() string { @@ -357,10 +359,10 @@ func (e *ErrDuplicateAddress) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrDuplicateAddress) afterLoad() {} +func (e *ErrDuplicateAddress) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrDuplicateAddress) StateLoad(stateSourceObject state.Source) { +func (e *ErrDuplicateAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrDuplicateNICID) StateTypeName() string { @@ -378,10 +380,10 @@ func (e *ErrDuplicateNICID) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrDuplicateNICID) afterLoad() {} +func (e *ErrDuplicateNICID) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrDuplicateNICID) StateLoad(stateSourceObject state.Source) { +func (e *ErrDuplicateNICID) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrInvalidEndpointState) StateTypeName() string { @@ -399,10 +401,10 @@ func (e *ErrInvalidEndpointState) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrInvalidEndpointState) afterLoad() {} +func (e *ErrInvalidEndpointState) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrInvalidEndpointState) StateLoad(stateSourceObject state.Source) { +func (e *ErrInvalidEndpointState) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrInvalidOptionValue) StateTypeName() string { @@ -420,10 +422,10 @@ func (e *ErrInvalidOptionValue) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrInvalidOptionValue) afterLoad() {} +func (e *ErrInvalidOptionValue) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrInvalidOptionValue) StateLoad(stateSourceObject state.Source) { +func (e *ErrInvalidOptionValue) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrInvalidPortRange) StateTypeName() string { @@ -441,10 +443,10 @@ func (e *ErrInvalidPortRange) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrInvalidPortRange) afterLoad() {} +func (e *ErrInvalidPortRange) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrInvalidPortRange) StateLoad(stateSourceObject state.Source) { +func (e *ErrInvalidPortRange) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrMalformedHeader) StateTypeName() string { @@ -462,10 +464,10 @@ func (e *ErrMalformedHeader) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrMalformedHeader) afterLoad() {} +func (e *ErrMalformedHeader) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrMalformedHeader) StateLoad(stateSourceObject state.Source) { +func (e *ErrMalformedHeader) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrMessageTooLong) StateTypeName() string { @@ -483,10 +485,10 @@ func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrMessageTooLong) afterLoad() {} +func (e *ErrMessageTooLong) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) { +func (e *ErrMessageTooLong) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNetworkUnreachable) StateTypeName() string { @@ -504,10 +506,10 @@ func (e *ErrNetworkUnreachable) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNetworkUnreachable) afterLoad() {} +func (e *ErrNetworkUnreachable) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNetworkUnreachable) StateLoad(stateSourceObject state.Source) { +func (e *ErrNetworkUnreachable) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNoBufferSpace) StateTypeName() string { @@ -525,10 +527,10 @@ func (e *ErrNoBufferSpace) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNoBufferSpace) afterLoad() {} +func (e *ErrNoBufferSpace) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNoBufferSpace) StateLoad(stateSourceObject state.Source) { +func (e *ErrNoBufferSpace) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNoPortAvailable) StateTypeName() string { @@ -546,10 +548,10 @@ func (e *ErrNoPortAvailable) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNoPortAvailable) afterLoad() {} +func (e *ErrNoPortAvailable) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNoPortAvailable) StateLoad(stateSourceObject state.Source) { +func (e *ErrNoPortAvailable) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrHostUnreachable) StateTypeName() string { @@ -567,10 +569,10 @@ func (e *ErrHostUnreachable) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrHostUnreachable) afterLoad() {} +func (e *ErrHostUnreachable) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrHostUnreachable) StateLoad(stateSourceObject state.Source) { +func (e *ErrHostUnreachable) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrHostDown) StateTypeName() string { @@ -588,10 +590,10 @@ func (e *ErrHostDown) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrHostDown) afterLoad() {} +func (e *ErrHostDown) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrHostDown) StateLoad(stateSourceObject state.Source) { +func (e *ErrHostDown) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNoNet) StateTypeName() string { @@ -609,10 +611,10 @@ func (e *ErrNoNet) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNoNet) afterLoad() {} +func (e *ErrNoNet) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNoNet) StateLoad(stateSourceObject state.Source) { +func (e *ErrNoNet) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNoSuchFile) StateTypeName() string { @@ -630,10 +632,10 @@ func (e *ErrNoSuchFile) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNoSuchFile) afterLoad() {} +func (e *ErrNoSuchFile) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNoSuchFile) StateLoad(stateSourceObject state.Source) { +func (e *ErrNoSuchFile) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNotConnected) StateTypeName() string { @@ -651,10 +653,10 @@ func (e *ErrNotConnected) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNotConnected) afterLoad() {} +func (e *ErrNotConnected) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNotConnected) StateLoad(stateSourceObject state.Source) { +func (e *ErrNotConnected) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNotPermitted) StateTypeName() string { @@ -672,10 +674,10 @@ func (e *ErrNotPermitted) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNotPermitted) afterLoad() {} +func (e *ErrNotPermitted) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNotPermitted) StateLoad(stateSourceObject state.Source) { +func (e *ErrNotPermitted) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrNotSupported) StateTypeName() string { @@ -693,10 +695,10 @@ func (e *ErrNotSupported) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrNotSupported) afterLoad() {} +func (e *ErrNotSupported) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrNotSupported) StateLoad(stateSourceObject state.Source) { +func (e *ErrNotSupported) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrPortInUse) StateTypeName() string { @@ -714,10 +716,10 @@ func (e *ErrPortInUse) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrPortInUse) afterLoad() {} +func (e *ErrPortInUse) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrPortInUse) StateLoad(stateSourceObject state.Source) { +func (e *ErrPortInUse) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrQueueSizeNotSupported) StateTypeName() string { @@ -735,10 +737,10 @@ func (e *ErrQueueSizeNotSupported) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrQueueSizeNotSupported) afterLoad() {} +func (e *ErrQueueSizeNotSupported) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrQueueSizeNotSupported) StateLoad(stateSourceObject state.Source) { +func (e *ErrQueueSizeNotSupported) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrTimeout) StateTypeName() string { @@ -756,10 +758,10 @@ func (e *ErrTimeout) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrTimeout) afterLoad() {} +func (e *ErrTimeout) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrTimeout) StateLoad(stateSourceObject state.Source) { +func (e *ErrTimeout) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrUnknownDevice) StateTypeName() string { @@ -777,10 +779,10 @@ func (e *ErrUnknownDevice) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrUnknownDevice) afterLoad() {} +func (e *ErrUnknownDevice) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrUnknownDevice) StateLoad(stateSourceObject state.Source) { +func (e *ErrUnknownDevice) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrUnknownNICID) StateTypeName() string { @@ -798,10 +800,10 @@ func (e *ErrUnknownNICID) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrUnknownNICID) afterLoad() {} +func (e *ErrUnknownNICID) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrUnknownNICID) StateLoad(stateSourceObject state.Source) { +func (e *ErrUnknownNICID) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrUnknownProtocol) StateTypeName() string { @@ -819,10 +821,10 @@ func (e *ErrUnknownProtocol) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrUnknownProtocol) afterLoad() {} +func (e *ErrUnknownProtocol) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrUnknownProtocol) StateLoad(stateSourceObject state.Source) { +func (e *ErrUnknownProtocol) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrUnknownProtocolOption) StateTypeName() string { @@ -840,10 +842,10 @@ func (e *ErrUnknownProtocolOption) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrUnknownProtocolOption) afterLoad() {} +func (e *ErrUnknownProtocolOption) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrUnknownProtocolOption) StateLoad(stateSourceObject state.Source) { +func (e *ErrUnknownProtocolOption) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrWouldBlock) StateTypeName() string { @@ -861,10 +863,10 @@ func (e *ErrWouldBlock) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrWouldBlock) afterLoad() {} +func (e *ErrWouldBlock) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrWouldBlock) StateLoad(stateSourceObject state.Source) { +func (e *ErrWouldBlock) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrMissingRequiredFields) StateTypeName() string { @@ -882,10 +884,10 @@ func (e *ErrMissingRequiredFields) StateSave(stateSinkObject state.Sink) { e.beforeSave() } -func (e *ErrMissingRequiredFields) afterLoad() {} +func (e *ErrMissingRequiredFields) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrMissingRequiredFields) StateLoad(stateSourceObject state.Source) { +func (e *ErrMissingRequiredFields) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (e *ErrMulticastInputCannotBeOutput) StateTypeName() string { @@ -903,10 +905,10 @@ func (e *ErrMulticastInputCannotBeOutput) StateSave(stateSinkObject state.Sink) e.beforeSave() } -func (e *ErrMulticastInputCannotBeOutput) afterLoad() {} +func (e *ErrMulticastInputCannotBeOutput) afterLoad(context.Context) {} // +checklocksignore -func (e *ErrMulticastInputCannotBeOutput) StateLoad(stateSourceObject state.Source) { +func (e *ErrMulticastInputCannotBeOutput) StateLoad(ctx context.Context, stateSourceObject state.Source) { } func (l *sockErrorList) StateTypeName() string { @@ -929,10 +931,10 @@ func (l *sockErrorList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *sockErrorList) afterLoad() {} +func (l *sockErrorList) afterLoad(context.Context) {} // +checklocksignore -func (l *sockErrorList) StateLoad(stateSourceObject state.Source) { +func (l *sockErrorList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -957,10 +959,10 @@ func (e *sockErrorEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *sockErrorEntry) afterLoad() {} +func (e *sockErrorEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *sockErrorEntry) StateLoad(stateSourceObject state.Source) { +func (e *sockErrorEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } @@ -1037,10 +1039,10 @@ func (so *SocketOptions) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(27, &so.rcvlowat) } -func (so *SocketOptions) afterLoad() {} +func (so *SocketOptions) afterLoad(context.Context) {} // +checklocksignore -func (so *SocketOptions) StateLoad(stateSourceObject state.Source) { +func (so *SocketOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &so.handler) stateSourceObject.Load(1, &so.broadcastEnabled) stateSourceObject.Load(2, &so.passCredEnabled) @@ -1089,10 +1091,10 @@ func (l *LocalSockError) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &l.info) } -func (l *LocalSockError) afterLoad() {} +func (l *LocalSockError) afterLoad(context.Context) {} // +checklocksignore -func (l *LocalSockError) StateLoad(stateSourceObject state.Source) { +func (l *LocalSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.info) } @@ -1126,10 +1128,10 @@ func (s *SockError) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(6, &s.NetProto) } -func (s *SockError) afterLoad() {} +func (s *SockError) afterLoad(context.Context) {} // +checklocksignore -func (s *SockError) StateLoad(stateSourceObject state.Source) { +func (s *SockError) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.sockErrorEntry) stateSourceObject.Load(1, &s.Err) stateSourceObject.Load(2, &s.Cause) @@ -1156,9 +1158,9 @@ func (s *stdClock) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (s *stdClock) StateLoad(stateSourceObject state.Source) { +func (s *stdClock) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.monotonicOffset) - stateSourceObject.AfterLoad(s.afterLoad) + stateSourceObject.AfterLoad(func() { s.afterLoad(ctx) }) } func (mt *MonotonicTime) StateTypeName() string { @@ -1179,10 +1181,10 @@ func (mt *MonotonicTime) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &mt.nanoseconds) } -func (mt *MonotonicTime) afterLoad() {} +func (mt *MonotonicTime) afterLoad(context.Context) {} // +checklocksignore -func (mt *MonotonicTime) StateLoad(stateSourceObject state.Source) { +func (mt *MonotonicTime) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &mt.nanoseconds) } @@ -1206,10 +1208,10 @@ func (a *Address) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &a.length) } -func (a *Address) afterLoad() {} +func (a *Address) afterLoad(context.Context) {} // +checklocksignore -func (a *Address) StateLoad(stateSourceObject state.Source) { +func (a *Address) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &a.addr) stateSourceObject.Load(1, &a.length) } @@ -1221,6 +1223,7 @@ func (m *AddressMask) StateTypeName() string { func (m *AddressMask) StateFields() []string { return []string{ "mask", + "length", } } @@ -1230,13 +1233,15 @@ func (m *AddressMask) beforeSave() {} func (m *AddressMask) StateSave(stateSinkObject state.Sink) { m.beforeSave() stateSinkObject.Save(0, &m.mask) + stateSinkObject.Save(1, &m.length) } -func (m *AddressMask) afterLoad() {} +func (m *AddressMask) afterLoad(context.Context) {} // +checklocksignore -func (m *AddressMask) StateLoad(stateSourceObject state.Source) { +func (m *AddressMask) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &m.mask) + stateSourceObject.Load(1, &m.length) } func (f *FullAddress) StateTypeName() string { @@ -1263,10 +1268,10 @@ func (f *FullAddress) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &f.LinkAddr) } -func (f *FullAddress) afterLoad() {} +func (f *FullAddress) afterLoad(context.Context) {} // +checklocksignore -func (f *FullAddress) StateLoad(stateSourceObject state.Source) { +func (f *FullAddress) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &f.NIC) stateSourceObject.Load(1, &f.Addr) stateSourceObject.Load(2, &f.Port) @@ -1301,10 +1306,10 @@ func (s *SendableControlMessages) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &s.IPv6PacketInfo) } -func (s *SendableControlMessages) afterLoad() {} +func (s *SendableControlMessages) afterLoad(context.Context) {} // +checklocksignore -func (s *SendableControlMessages) StateLoad(stateSourceObject state.Source) { +func (s *SendableControlMessages) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.HasTTL) stateSourceObject.Load(1, &s.TTL) stateSourceObject.Load(2, &s.HasHopLimit) @@ -1369,10 +1374,10 @@ func (c *ReceivableControlMessages) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(18, &c.SockErr) } -func (c *ReceivableControlMessages) afterLoad() {} +func (c *ReceivableControlMessages) afterLoad(context.Context) {} // +checklocksignore -func (c *ReceivableControlMessages) StateLoad(stateSourceObject state.Source) { +func (c *ReceivableControlMessages) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(1, &c.HasInq) stateSourceObject.Load(2, &c.Inq) stateSourceObject.Load(3, &c.HasTOS) @@ -1391,7 +1396,7 @@ func (c *ReceivableControlMessages) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(16, &c.HasOriginalDstAddress) stateSourceObject.Load(17, &c.OriginalDstAddress) stateSourceObject.Load(18, &c.SockErr) - stateSourceObject.LoadValue(0, new(int64), func(y any) { c.loadTimestamp(y.(int64)) }) + stateSourceObject.LoadValue(0, new(int64), func(y any) { c.loadTimestamp(ctx, y.(int64)) }) } func (l *LinkPacketInfo) StateTypeName() string { @@ -1414,10 +1419,10 @@ func (l *LinkPacketInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.PktType) } -func (l *LinkPacketInfo) afterLoad() {} +func (l *LinkPacketInfo) afterLoad(context.Context) {} // +checklocksignore -func (l *LinkPacketInfo) StateLoad(stateSourceObject state.Source) { +func (l *LinkPacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.Protocol) stateSourceObject.Load(1, &l.PktType) } @@ -1440,10 +1445,10 @@ func (f *ICMPv6Filter) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &f.DenyType) } -func (f *ICMPv6Filter) afterLoad() {} +func (f *ICMPv6Filter) afterLoad(context.Context) {} // +checklocksignore -func (f *ICMPv6Filter) StateLoad(stateSourceObject state.Source) { +func (f *ICMPv6Filter) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &f.DenyType) } @@ -1467,10 +1472,10 @@ func (l *LingerOption) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.Timeout) } -func (l *LingerOption) afterLoad() {} +func (l *LingerOption) afterLoad(context.Context) {} // +checklocksignore -func (l *LingerOption) StateLoad(stateSourceObject state.Source) { +func (l *LingerOption) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.Enabled) stateSourceObject.Load(1, &l.Timeout) } @@ -1497,10 +1502,10 @@ func (i *IPPacketInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &i.DestinationAddr) } -func (i *IPPacketInfo) afterLoad() {} +func (i *IPPacketInfo) afterLoad(context.Context) {} // +checklocksignore -func (i *IPPacketInfo) StateLoad(stateSourceObject state.Source) { +func (i *IPPacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.NIC) stateSourceObject.Load(1, &i.LocalAddr) stateSourceObject.Load(2, &i.DestinationAddr) @@ -1526,10 +1531,10 @@ func (i *IPv6PacketInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &i.NIC) } -func (i *IPv6PacketInfo) afterLoad() {} +func (i *IPv6PacketInfo) afterLoad(context.Context) {} // +checklocksignore -func (i *IPv6PacketInfo) StateLoad(stateSourceObject state.Source) { +func (i *IPv6PacketInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &i.Addr) stateSourceObject.Load(1, &i.NIC) } @@ -1552,10 +1557,10 @@ func (s *StatCounter) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &s.count) } -func (s *StatCounter) afterLoad() {} +func (s *StatCounter) afterLoad(context.Context) {} // +checklocksignore -func (s *StatCounter) StateLoad(stateSourceObject state.Source) { +func (s *StatCounter) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.count) } @@ -1583,10 +1588,10 @@ func (r *ReceiveErrors) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &r.ChecksumErrors) } -func (r *ReceiveErrors) afterLoad() {} +func (r *ReceiveErrors) afterLoad(context.Context) {} // +checklocksignore -func (r *ReceiveErrors) StateLoad(stateSourceObject state.Source) { +func (r *ReceiveErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.ReceiveBufferOverflow) stateSourceObject.Load(1, &r.MalformedPacketsReceived) stateSourceObject.Load(2, &r.ClosedReceiver) @@ -1613,10 +1618,10 @@ func (s *SendErrors) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &s.NoRoute) } -func (s *SendErrors) afterLoad() {} +func (s *SendErrors) afterLoad(context.Context) {} // +checklocksignore -func (s *SendErrors) StateLoad(stateSourceObject state.Source) { +func (s *SendErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.SendToNetworkFailed) stateSourceObject.Load(1, &s.NoRoute) } @@ -1643,10 +1648,10 @@ func (r *ReadErrors) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &r.NotConnected) } -func (r *ReadErrors) afterLoad() {} +func (r *ReadErrors) afterLoad(context.Context) {} // +checklocksignore -func (r *ReadErrors) StateLoad(stateSourceObject state.Source) { +func (r *ReadErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.ReadClosed) stateSourceObject.Load(1, &r.InvalidEndpointState) stateSourceObject.Load(2, &r.NotConnected) @@ -1674,10 +1679,10 @@ func (w *WriteErrors) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &w.InvalidArgs) } -func (w *WriteErrors) afterLoad() {} +func (w *WriteErrors) afterLoad(context.Context) {} // +checklocksignore -func (w *WriteErrors) StateLoad(stateSourceObject state.Source) { +func (w *WriteErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &w.WriteClosed) stateSourceObject.Load(1, &w.InvalidEndpointState) stateSourceObject.Load(2, &w.InvalidArgs) @@ -1711,10 +1716,10 @@ func (src *TransportEndpointStats) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &src.WriteErrors) } -func (src *TransportEndpointStats) afterLoad() {} +func (src *TransportEndpointStats) afterLoad(context.Context) {} // +checklocksignore -func (src *TransportEndpointStats) StateLoad(stateSourceObject state.Source) { +func (src *TransportEndpointStats) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &src.PacketsReceived) stateSourceObject.Load(1, &src.PacketsSent) stateSourceObject.Load(2, &src.ReceiveErrors) @@ -1743,10 +1748,10 @@ func (a *AddressWithPrefix) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &a.PrefixLen) } -func (a *AddressWithPrefix) afterLoad() {} +func (a *AddressWithPrefix) afterLoad(context.Context) {} // +checklocksignore -func (a *AddressWithPrefix) StateLoad(stateSourceObject state.Source) { +func (a *AddressWithPrefix) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &a.Address) stateSourceObject.Load(1, &a.PrefixLen) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go index 880099f1..9d61e881 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go @@ -265,7 +265,7 @@ func (c *WriteContext) PacketInfo() WritePacketInfo { // // If this method returns nil, the caller should wait for the endpoint to become // writable. -func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffer) stack.PacketBufferPtr { +func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffer) *stack.PacketBuffer { e := c.e e.sendBufferSizeInUseMu.Lock() @@ -308,7 +308,7 @@ func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffe } // WritePacket attempts to write the packet. -func (c *WriteContext) WritePacket(pkt stack.PacketBufferPtr, headerIncluded bool) tcpip.Error { +func (c *WriteContext) WritePacket(pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error { c.e.mu.RLock() pkt.Owner = c.e.owner c.e.mu.RUnlock() @@ -411,7 +411,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext // interface/address used to send the packet so we need to construct // a new route instead of using the connected route. // - // Contruct a destination matching the remote the endpoint is connected + // Construct a destination matching the remote the endpoint is connected // to. to = &tcpip.FullAddress{ // RegisterNICID is set when the endpoint is connected. It is usually diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go index 053e1d5d..f3e38fc8 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/network_state_autogen.go @@ -3,6 +3,8 @@ package network import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -58,10 +60,10 @@ func (e *Endpoint) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(17, &e.state) } -func (e *Endpoint) afterLoad() {} +func (e *Endpoint) afterLoad(context.Context) {} // +checklocksignore -func (e *Endpoint) StateLoad(stateSourceObject state.Source) { +func (e *Endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.ops) stateSourceObject.Load(1, &e.netProto) stateSourceObject.Load(2, &e.transProto) @@ -102,10 +104,10 @@ func (m *multicastMembership) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &m.multicastAddr) } -func (m *multicastMembership) afterLoad() {} +func (m *multicastMembership) afterLoad(context.Context) {} // +checklocksignore -func (m *multicastMembership) StateLoad(stateSourceObject state.Source) { +func (m *multicastMembership) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &m.nicID) stateSourceObject.Load(1, &m.multicastAddr) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go index 3e9c4c4b..be2adae1 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/endpoint.go @@ -137,7 +137,7 @@ func (*endpoint) GetSockOptInt(tcpip.SockOptInt) (int, tcpip.Error) { } // HandlePacket implements stack.RawTransportEndpoint.HandlePacket. -func (*endpoint) HandlePacket(pkt stack.PacketBufferPtr) { +func (*endpoint) HandlePacket(pkt *stack.PacketBuffer) { panic(fmt.Sprintf("unreachable: noop.endpoint should never be registered, but got packet: %+v", pkt)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go index 27d6064a..ac5a8611 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop/noop_state_autogen.go @@ -3,6 +3,8 @@ package noop import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -26,10 +28,10 @@ func (ep *endpoint) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &ep.ops) } -func (ep *endpoint) afterLoad() {} +func (ep *endpoint) afterLoad(context.Context) {} // +checklocksignore -func (ep *endpoint) StateLoad(stateSourceObject state.Source) { +func (ep *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &ep.DefaultSocketOptionsHandler) stateSourceObject.Load(1, &ep.ops) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go index 46a7f82a..9166bca6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go @@ -40,7 +40,7 @@ import ( type packet struct { packetEntry // data holds the actual packet data, including any headers and payload. - data stack.PacketBufferPtr + data *stack.PacketBuffer receivedAt time.Time `state:".(int64)"` // senderAddr is the network address of the sender. senderAddr tcpip.FullAddress @@ -94,7 +94,7 @@ type endpoint struct { } // NewEndpoint returns a new packet endpoint. -func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) { +func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) tcpip.Endpoint { ep := &endpoint{ stack: s, cooked: cooked, @@ -115,10 +115,9 @@ func NewEndpoint(s *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumb ep.ops.SetReceiveBufferSize(int64(rs.Default), false /* notify */) } - if err := s.RegisterPacketEndpoint(0, netProto, ep); err != nil { - return nil, err - } - return ep, nil + s.RegisterPacketEndpoint(0, netProto, ep) + + return ep } // Abort implements stack.TransportEndpoint.Abort. @@ -263,7 +262,7 @@ func (*endpoint) Disconnect() tcpip.Error { } // Connect implements tcpip.Endpoint.Connect. Packet sockets cannot be -// connected, and this function always returnes *tcpip.ErrNotSupported. +// connected, and this function always returns *tcpip.ErrNotSupported. func (*endpoint) Connect(tcpip.FullAddress) tcpip.Error { return &tcpip.ErrNotSupported{} } @@ -417,7 +416,7 @@ func (ep *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { } // HandlePacket implements stack.PacketEndpoint.HandlePacket. -func (ep *endpoint) HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { +func (ep *endpoint) HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { ep.rcvMu.Lock() // Drop the packet if our buffer is currently full. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go index 74203fa8..16be7d6b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint_state.go @@ -15,6 +15,7 @@ package packet import ( + "context" "fmt" "time" @@ -28,7 +29,7 @@ func (p *packet) saveReceivedAt() int64 { } // loadReceivedAt is invoked by stateify. -func (p *packet) loadReceivedAt(nsec int64) { +func (p *packet) loadReceivedAt(_ context.Context, nsec int64) { p.receivedAt = time.Unix(0, nsec) } @@ -37,14 +38,15 @@ func (ep *endpoint) beforeSave() { ep.rcvMu.Lock() defer ep.rcvMu.Unlock() ep.rcvDisabled = true + ep.stack.RegisterResumableEndpoint(ep) } // afterLoad is invoked by stateify. -func (ep *endpoint) afterLoad() { +func (ep *endpoint) afterLoad(ctx context.Context) { ep.mu.Lock() defer ep.mu.Unlock() - ep.stack = stack.StackFromEnv + ep.stack = stack.RestoreStackFromContext(ctx) ep.ops.InitHandler(ep, ep.stack, tcpip.GetStackSendBufferLimits, tcpip.GetStackReceiveBufferLimits) if err := ep.stack.RegisterPacketEndpoint(ep.boundNIC, ep.boundNetProto, ep); err != nil { @@ -55,3 +57,10 @@ func (ep *endpoint) afterLoad() { ep.rcvDisabled = false ep.rcvMu.Unlock() } + +// Resume implements tcpip.ResumableEndpoint.Resume. +func (ep *endpoint) Resume() { + ep.rcvMu.Lock() + defer ep.rcvMu.Unlock() + ep.rcvDisabled = false +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go index 5e344c8d..7e2f7fda 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/packet_state_autogen.go @@ -3,6 +3,8 @@ package packet import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -34,15 +36,15 @@ func (p *packet) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(4, &p.packetInfo) } -func (p *packet) afterLoad() {} +func (p *packet) afterLoad(context.Context) {} // +checklocksignore -func (p *packet) StateLoad(stateSourceObject state.Source) { +func (p *packet) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &p.packetEntry) stateSourceObject.Load(1, &p.data) stateSourceObject.Load(3, &p.senderAddr) stateSourceObject.Load(4, &p.packetInfo) - stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) }) + stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) }) } func (ep *endpoint) StateTypeName() string { @@ -86,7 +88,7 @@ func (ep *endpoint) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (ep *endpoint) StateLoad(stateSourceObject state.Source) { +func (ep *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &ep.DefaultSocketOptionsHandler) stateSourceObject.Load(1, &ep.waiterQueue) stateSourceObject.Load(2, &ep.cooked) @@ -100,7 +102,7 @@ func (ep *endpoint) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(10, &ep.boundNetProto) stateSourceObject.Load(11, &ep.boundNIC) stateSourceObject.Load(12, &ep.lastError) - stateSourceObject.AfterLoad(ep.afterLoad) + stateSourceObject.AfterLoad(func() { ep.afterLoad(ctx) }) } func (l *packetList) StateTypeName() string { @@ -123,10 +125,10 @@ func (l *packetList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *packetList) afterLoad() {} +func (l *packetList) afterLoad(context.Context) {} // +checklocksignore -func (l *packetList) StateLoad(stateSourceObject state.Source) { +func (l *packetList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -151,10 +153,10 @@ func (e *packetEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *packetEntry) afterLoad() {} +func (e *packetEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *packetEntry) StateLoad(stateSourceObject state.Source) { +func (e *packetEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go index 476932d2..1eaedc19 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go @@ -46,7 +46,7 @@ type rawPacket struct { rawPacketEntry // data holds the actual packet data, including any headers and // payload. - data stack.PacketBufferPtr + data *stack.PacketBuffer receivedAt time.Time `state:".(int64)"` // senderAddr is the network address of the sender. senderAddr tcpip.FullAddress @@ -377,7 +377,7 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp } pkt := ctx.TryNewPacketBuffer(int(ctx.PacketInfo().MaxHeaderLength), payload.Clone()) - if pkt.IsNil() { + if pkt == nil { return 0, &tcpip.ErrWouldBlock{} } defer pkt.DecRef() @@ -586,7 +586,7 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { } // HandlePacket implements stack.RawTransportEndpoint.HandlePacket. -func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { +func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) { notifyReadableEvents := func() bool { e.mu.RLock() defer e.mu.RUnlock() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go index 1bda0b8b..d915ade2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint_state.go @@ -15,6 +15,7 @@ package raw import ( + "context" "fmt" "time" @@ -28,22 +29,23 @@ func (p *rawPacket) saveReceivedAt() int64 { } // loadReceivedAt is invoked by stateify. -func (p *rawPacket) loadReceivedAt(nsec int64) { +func (p *rawPacket) loadReceivedAt(_ context.Context, nsec int64) { p.receivedAt = time.Unix(0, nsec) } // afterLoad is invoked by stateify. -func (e *endpoint) afterLoad() { - stack.StackFromEnv.RegisterRestoredEndpoint(e) +func (e *endpoint) afterLoad(ctx context.Context) { + stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e) } // beforeSave is invoked by stateify. func (e *endpoint) beforeSave() { e.setReceiveDisabled(true) + e.stack.RegisterResumableEndpoint(e) } -// Resume implements tcpip.ResumableEndpoint.Resume. -func (e *endpoint) Resume(s *stack.Stack) { +// Restore implements tcpip.RestoredEndpoint.Restore. +func (e *endpoint) Restore(s *stack.Stack) { e.net.Resume(s) e.setReceiveDisabled(false) @@ -57,3 +59,8 @@ func (e *endpoint) Resume(s *stack.Stack) { } } } + +// Resume implements tcpip.ResumableEndpoint.Resume. +func (e *endpoint) Resume() { + e.setReceiveDisabled(false) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go index 624e2dbe..63122b1c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/protocol.go @@ -32,7 +32,7 @@ func (EndpointFactory) NewUnassociatedEndpoint(stack *stack.Stack, netProto tcpi // NewPacketEndpoint implements stack.RawFactory.NewPacketEndpoint. func (EndpointFactory) NewPacketEndpoint(stack *stack.Stack, cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) { - return packet.NewEndpoint(stack, cooked, netProto, waiterQueue) + return packet.NewEndpoint(stack, cooked, netProto, waiterQueue), nil } // CreateOnlyFactory implements stack.RawFactory. It allows creation of raw diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go index 1c9bbdbb..1dc91f84 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/raw_state_autogen.go @@ -3,6 +3,8 @@ package raw import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -38,17 +40,17 @@ func (p *rawPacket) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(6, &p.ttlOrHopLimit) } -func (p *rawPacket) afterLoad() {} +func (p *rawPacket) afterLoad(context.Context) {} // +checklocksignore -func (p *rawPacket) StateLoad(stateSourceObject state.Source) { +func (p *rawPacket) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &p.rawPacketEntry) stateSourceObject.Load(1, &p.data) stateSourceObject.Load(3, &p.senderAddr) stateSourceObject.Load(4, &p.packetInfo) stateSourceObject.Load(5, &p.tosOrTClass) stateSourceObject.Load(6, &p.ttlOrHopLimit) - stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) }) + stateSourceObject.LoadValue(2, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) }) } func (e *endpoint) StateTypeName() string { @@ -92,7 +94,7 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (e *endpoint) StateLoad(stateSourceObject state.Source) { +func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.DefaultSocketOptionsHandler) stateSourceObject.Load(1, &e.transProto) stateSourceObject.Load(2, &e.waiterQueue) @@ -106,7 +108,7 @@ func (e *endpoint) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(10, &e.rcvDisabled) stateSourceObject.Load(11, &e.ipv6ChecksumOffset) stateSourceObject.Load(12, &e.icmpv6Filter) - stateSourceObject.AfterLoad(e.afterLoad) + stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) }) } func (l *rawPacketList) StateTypeName() string { @@ -129,10 +131,10 @@ func (l *rawPacketList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *rawPacketList) afterLoad() {} +func (l *rawPacketList) afterLoad(context.Context) {} // +checklocksignore -func (l *rawPacketList) StateLoad(stateSourceObject state.Source) { +func (l *rawPacketList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -157,10 +159,10 @@ func (e *rawPacketEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *rawPacketEntry) afterLoad() {} +func (e *rawPacketEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *rawPacketEntry) StateLoad(stateSourceObject state.Source) { +func (e *rawPacketEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go index 0006f250..adcfdcfd 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go @@ -85,7 +85,7 @@ type listenContext struct { // listenEP is a reference to the listening endpoint associated with // this context. Can be nil if the context is created by the forwarder. - listenEP *endpoint + listenEP *Endpoint // hasherMu protects hasher. hasherMu sync.Mutex @@ -107,7 +107,7 @@ func timeStamp(clock tcpip.Clock) uint32 { } // newListenContext creates a new listen context. -func newListenContext(stk *stack.Stack, protocol *protocol, listenEP *endpoint, rcvWnd seqnum.Size, v6Only bool, netProto tcpip.NetworkProtocolNumber) *listenContext { +func newListenContext(stk *stack.Stack, protocol *protocol, listenEP *Endpoint, rcvWnd seqnum.Size, v6Only bool, netProto tcpip.NetworkProtocolNumber) *listenContext { l := &listenContext{ stack: stk, protocol: protocol, @@ -119,7 +119,7 @@ func newListenContext(stk *stack.Stack, protocol *protocol, listenEP *endpoint, } for i := range l.nonce { - if _, err := io.ReadFull(stk.SecureRNG(), l.nonce[i][:]); err != nil { + if _, err := io.ReadFull(stk.SecureRNG().Reader, l.nonce[i][:]); err != nil { panic(err) } } @@ -183,7 +183,7 @@ func (l *listenContext) isCookieValid(id stack.TransportEndpointID, cookie seqnu // the connection parameters given by the arguments. The newly created endpoint // will be locked. // +checklocksacquire:n.mu -func (l *listenContext) createConnectingEndpoint(s *segment, rcvdSynOpts header.TCPSynOptions, queue *waiter.Queue) (n *endpoint, _ tcpip.Error) { +func (l *listenContext) createConnectingEndpoint(s *segment, rcvdSynOpts header.TCPSynOptions, queue *waiter.Queue) (n *Endpoint, _ tcpip.Error) { // Create a new endpoint. netProto := l.netProto if netProto == 0 { @@ -302,7 +302,7 @@ func (l *listenContext) startHandshake(s *segment, opts header.TCPSynOptions, qu // established endpoint is returned. // // Precondition: if l.listenEP != nil, l.listenEP.mu must be locked. -func (l *listenContext) performHandshake(s *segment, opts header.TCPSynOptions, queue *waiter.Queue, owner tcpip.PacketOwner) (*endpoint, tcpip.Error) { +func (l *listenContext) performHandshake(s *segment, opts header.TCPSynOptions, queue *waiter.Queue, owner tcpip.PacketOwner) (*Endpoint, tcpip.Error) { waitEntry, notifyCh := waiter.NewChannelEntry(waiter.WritableEvents) queue.EventRegister(&waitEntry) defer queue.EventUnregister(&waitEntry) @@ -357,7 +357,7 @@ func (l *listenContext) performHandshake(s *segment, opts header.TCPSynOptions, // // +checklocks:e.mu // +checklocks:n.mu -func (e *endpoint) propagateInheritableOptionsLocked(n *endpoint) { +func (e *Endpoint) propagateInheritableOptionsLocked(n *Endpoint) { n.userTimeout = e.userTimeout n.portFlags = e.portFlags n.boundBindToDevice = e.boundBindToDevice @@ -370,7 +370,7 @@ func (e *endpoint) propagateInheritableOptionsLocked(n *endpoint) { // Precondition: e.propagateInheritableOptionsLocked has been called. // // +checklocks:e.mu -func (e *endpoint) reserveTupleLocked() bool { +func (e *Endpoint) reserveTupleLocked() bool { dest := tcpip.FullAddress{ Addr: e.TransportEndpointInfo.ID.RemoteAddress, Port: e.TransportEndpointInfo.ID.RemotePort, @@ -400,11 +400,11 @@ func (e *endpoint) reserveTupleLocked() bool { // This is strictly not required normally as a socket that was never accepted // can't really have any registered waiters except when stack.Wait() is called // which waits for all registered endpoints to stop and expects an EventHUp. -func (e *endpoint) notifyAborted() { +func (e *Endpoint) notifyAborted() { e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.ReadableEvents | waiter.WritableEvents) } -func (e *endpoint) acceptQueueIsFull() bool { +func (e *Endpoint) acceptQueueIsFull() bool { e.acceptMu.Lock() full := e.acceptQueue.isFull() e.acceptMu.Unlock() @@ -416,11 +416,11 @@ type acceptQueue struct { // NB: this could be an endpointList, but ilist only permits endpoints to // belong to one list at a time, and endpoints are already stored in the // dispatcher's list. - endpoints list.List `state:".([]*endpoint)"` + endpoints list.List `state:".([]*Endpoint)"` // pendingEndpoints is a set of all endpoints for which a handshake is // in progress. - pendingEndpoints map[*endpoint]struct{} + pendingEndpoints map[*Endpoint]struct{} // capacity is the maximum number of endpoints that can be in endpoints. capacity int @@ -434,7 +434,7 @@ func (a *acceptQueue) isFull() bool { // and needs to handle it. // // +checklocks:e.mu -func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Error { +func (e *Endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Error { e.rcvQueueMu.Lock() rcvClosed := e.RcvClosed e.rcvQueueMu.Unlock() @@ -544,6 +544,39 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err iss := s.ackNumber - 1 irs := s.sequenceNumber - 1 + // As an edge case when SYN-COOKIES are in use and we receive a + // segment that has data and is valid we should check if it + // already matches a created endpoint and redirect the segment + // rather than try and create a new endpoint. This can happen + // where the final ACK for the handshake and other data packets + // arrive at the same time and are queued to the listening + // endpoint before the listening endpoint has had time to + // process the first ACK and create the endpoint that matches + // the incoming packet's full 5 tuple. + netProtos := []tcpip.NetworkProtocolNumber{s.pkt.NetworkProtocolNumber} + // If the local address is an IPv4 Address then also look for IPv6 + // dual stack endpoints. + if s.id.LocalAddress.To4() != (tcpip.Address{}) { + netProtos = []tcpip.NetworkProtocolNumber{header.IPv4ProtocolNumber, header.IPv6ProtocolNumber} + } + for _, netProto := range netProtos { + if newEP := e.stack.FindTransportEndpoint(netProto, ProtocolNumber, s.id, s.pkt.NICID); newEP != nil && newEP != e { + tcpEP := newEP.(*Endpoint) + if !tcpEP.EndpointState().connected() { + continue + } + if !tcpEP.enqueueSegment(s) { + // Just silently drop the segment as we failed + // to queue, we don't want to generate a RST + // further below or try and create a new + // endpoint etc. + return nil + } + tcpEP.notifyProcessor() + return nil + } + } + // Since SYN cookies are in use this is potentially an ACK to a // SYN-ACK we sent but don't have a half open connection state // as cookies are being used to protect against a potential SYN @@ -577,39 +610,6 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err return replyWithReset(e.stack, s, e.sendTOS, e.ipv4TTL, e.ipv6HopLimit) } - // As an edge case when SYN-COOKIES are in use and we receive a - // segment that has data and is valid we should check if it - // already matches a created endpoint and redirect the segment - // rather than try and create a new endpoint. This can happen - // where the final ACK for the handshake and other data packets - // arrive at the same time and are queued to the listening - // endpoint before the listening endpoint has had time to - // process the first ACK and create the endpoint that matches - // the incoming packet's full 5 tuple. - netProtos := []tcpip.NetworkProtocolNumber{s.pkt.NetworkProtocolNumber} - // If the local address is an IPv4 Address then also look for IPv6 - // dual stack endpoints. - if s.id.LocalAddress.To4() != (tcpip.Address{}) { - netProtos = []tcpip.NetworkProtocolNumber{header.IPv4ProtocolNumber, header.IPv6ProtocolNumber} - } - for _, netProto := range netProtos { - if newEP := e.stack.FindTransportEndpoint(netProto, ProtocolNumber, s.id, s.pkt.NICID); newEP != nil && newEP != e { - tcpEP := newEP.(*endpoint) - if !tcpEP.EndpointState().connected() { - continue - } - if !tcpEP.enqueueSegment(s) { - // Just silently drop the segment as we failed - // to queue, we don't want to generate a RST - // further below or try and create a new - // endpoint etc. - return nil - } - tcpEP.notifyProcessor() - return nil - } - } - // Keep hold of acceptMu until the new endpoint is in the accept queue (or // if there is an error), to guarantee that we will keep our spot in the // queue even if another handshake from the syn queue completes. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go index 0f900174..b9e2b639 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go @@ -15,6 +15,7 @@ package tcp import ( + "crypto/sha256" "encoding/binary" "fmt" "math" @@ -23,7 +24,6 @@ import ( "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/checksum" - "gvisor.dev/gvisor/pkg/tcpip/hash/jenkins" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/seqnum" "gvisor.dev/gvisor/pkg/tcpip/stack" @@ -63,8 +63,8 @@ const ( // // +stateify savable type handshake struct { - ep *endpoint - listenEP *endpoint + ep *Endpoint + listenEP *Endpoint state handshakeState active bool flags header.TCPFlags @@ -115,15 +115,14 @@ type handshake struct { retransmitTimer *backoffTimer `state:"nosave"` } -// maybeFailTimerHandler takes a handler function for a timer that may fail and -// returns a function that will invoke the provided handler with the endpoint -// mutex held. In addition the returned function will perform any cleanup that -// maybe required if the timer handler returns an error and in case of no errors -// will notify the processor if there are pending segments that need to be -// processed. - +// timerHandler takes a handler function for a timer and returns a function that +// will invoke the provided handler with the endpoint mutex held. In addition +// the returned function will perform any cleanup that may be required if the +// timer handler returns an error. In the case of no errors it will notify the +// processor if there are pending segments that need to be processed. +// // NOTE: e.mu is held for the duration of the call to f(). -func maybeFailTimerHandler(e *endpoint, f func() tcpip.Error) func() { +func timerHandler(e *Endpoint, f func() tcpip.Error) func() { return func() { e.mu.Lock() if err := f(); err != nil { @@ -154,30 +153,9 @@ func maybeFailTimerHandler(e *endpoint, f func() tcpip.Error) func() { } } -// timerHandler takes a handler function for a timer that never results in a -// connection being aborted and returns a function that will invoke the provided -// handler with the endpoint mutex held. In addition the returned function will -// notify the processor if there are pending segments that need to be processed -// once the handler function completes. -// -// NOTE: e.mu is held for the duration of the call to f() -func timerHandler(e *endpoint, f func()) func() { - return func() { - e.mu.Lock() - f() - processor := e.protocol.dispatcher.selectProcessor(e.ID) - e.mu.Unlock() - // notify processor if there are pending segments to be - // processed. - if !e.segmentQueue.empty() { - processor.queueEndpoint(e) - } - } -} - // +checklocks:e.mu // +checklocksacquire:h.ep.mu -func (e *endpoint) newHandshake() (h *handshake) { +func (e *Endpoint) newHandshake() (h *handshake) { h = &handshake{ ep: e, active: true, @@ -190,7 +168,7 @@ func (e *endpoint) newHandshake() (h *handshake) { e.h = h // By the time handshake is created, e.ID is already initialized. e.TSOffset = e.protocol.tsOffset(e.ID.LocalAddress, e.ID.RemoteAddress) - timer, err := newBackoffTimer(h.ep.stack.Clock(), InitialRTO, MaxRTO, maybeFailTimerHandler(e, h.retransmitHandlerLocked)) + timer, err := newBackoffTimer(h.ep.stack.Clock(), InitialRTO, MaxRTO, timerHandler(e, h.retransmitHandlerLocked)) if err != nil { panic(fmt.Sprintf("newBackOffTimer(_, %s, %s, _) failed: %s", InitialRTO, MaxRTO, err)) } @@ -200,7 +178,7 @@ func (e *endpoint) newHandshake() (h *handshake) { // +checklocks:e.mu // +checklocksacquire:h.ep.mu -func (e *endpoint) newPassiveHandshake(isn, irs seqnum.Value, opts header.TCPSynOptions, deferAccept time.Duration) (h *handshake) { +func (e *Endpoint) newPassiveHandshake(isn, irs seqnum.Value, opts header.TCPSynOptions, deferAccept time.Duration) (h *handshake) { h = e.newHandshake() h.resetToSynRcvd(isn, irs, opts, deferAccept) return h @@ -235,11 +213,13 @@ func (h *handshake) resetState() { // generateSecureISN generates a secure Initial Sequence number based on the // recommendation here https://tools.ietf.org/html/rfc6528#page-3. -func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed uint32) seqnum.Value { - isnHasher := jenkins.Sum32(seed) +func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed [16]byte) seqnum.Value { + isnHasher := sha256.New() + // Per hash.Hash.Writer: // // It never returns an error. + _, _ = isnHasher.Write(seed[:]) _, _ = isnHasher.Write(id.LocalAddress.AsSlice()) _, _ = isnHasher.Write(id.RemoteAddress.AsSlice()) portBuf := make([]byte, 2) @@ -257,7 +237,8 @@ func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed uin // // Which sort of guarantees that we won't reuse the ISN for a new // connection for the same tuple for at least 274s. - isn := isnHasher.Sum32() + uint32(clock.NowMonotonic().Sub(tcpip.MonotonicTime{}).Nanoseconds()>>6) + hash := binary.LittleEndian.Uint32(isnHasher.Sum(nil)[:4]) + isn := hash + uint32(clock.NowMonotonic().Sub(tcpip.MonotonicTime{}).Nanoseconds()>>6) return seqnum.Value(isn) } @@ -287,19 +268,9 @@ func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts head } // checkAck checks if the ACK number, if present, of a segment received during -// a TCP 3-way handshake is valid. If it's not, a RST segment is sent back in -// response. +// a TCP 3-way handshake is valid. func (h *handshake) checkAck(s *segment) bool { - if s.flags.Contains(header.TCPFlagAck) && s.ackNumber != h.iss+1 { - // RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72): - // If the segment acknowledgment is not acceptable, form a reset segment, - // - // and send it. - h.ep.sendEmptyRaw(header.TCPFlagRst, s.ackNumber, 0, 0) - return false - } - - return true + return !(s.flags.Contains(header.TCPFlagAck) && s.ackNumber != h.iss+1) } // synSentState handles a segment received when the TCP 3-way handshake is in @@ -321,6 +292,11 @@ func (h *handshake) synSentState(s *segment) tcpip.Error { } if !h.checkAck(s) { + // RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72): + // If the segment acknowledgment is not acceptable, form a reset segment, + // + // and send it. + h.ep.sendEmptyRaw(header.TCPFlagRst, s.ackNumber, 0, 0) return nil } @@ -402,8 +378,30 @@ func (h *handshake) synRcvdState(s *segment) tcpip.Error { return nil } - if !h.checkAck(s) { - return nil + // It's possible that s is an ACK of a SYN cookie. This can happen if: + // + // - We receive a SYN while under load and issue a SYN/ACK with + // cookie S. + // - We receive a retransmitted SYN while space exists in the SYN + // queue, and issue a SYN/ACK with seqnum S'. + // - We receive the ACK based on S. + // + // If we receive a SYN cookie ACK, just use the cookie seqnum. + if !h.checkAck(s) && h.listenEP != nil { + iss := s.ackNumber - 1 + data, ok := h.listenEP.listenCtx.isCookieValid(s.id, iss, s.sequenceNumber-1) + if !ok || int(data) >= len(mssTable) { + // This isn't a valid cookie. + // RFC 793, page 72 (https://datatracker.ietf.org/doc/html/rfc793#page-72): + // If the segment acknowledgment is not acceptable, form a reset segment, + // + // and send it. + h.ep.sendEmptyRaw(header.TCPFlagRst, s.ackNumber, 0, 0) + return nil + } + // This is a cookie that snuck its way in after we stopped using them. + h.mss = mssTable[data] + h.iss = iss } // RFC 793, Section 3.9, page 69, states that in the SYN-RCVD state, a @@ -798,7 +796,7 @@ type tcpFields struct { txHash uint32 } -func (e *endpoint) sendSynTCP(r *stack.Route, tf tcpFields, opts header.TCPSynOptions) tcpip.Error { +func (e *Endpoint) sendSynTCP(r *stack.Route, tf tcpFields, opts header.TCPSynOptions) tcpip.Error { tf.opts = makeSynOptions(opts) // We ignore SYN send errors and let the callers re-attempt send. p := stack.NewPacketBuffer(stack.PacketBufferOptions{ReserveHeaderBytes: header.TCPMinimumSize + int(r.MaxHeaderLength()) + len(tf.opts)}) @@ -811,7 +809,7 @@ func (e *endpoint) sendSynTCP(r *stack.Route, tf tcpFields, opts header.TCPSynOp } // This method takes ownership of pkt. -func (e *endpoint) sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO) tcpip.Error { +func (e *Endpoint) sendTCP(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO) tcpip.Error { tf.txHash = e.txHash if err := sendTCP(r, tf, pkt, gso, e.owner); err != nil { e.stats.SendErrors.SegmentSendToNetworkFailed.Increment() @@ -821,7 +819,7 @@ func (e *endpoint) sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferP return nil } -func buildTCPHdr(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO) { +func buildTCPHdr(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO) { optLen := len(tf.opts) tcp := header.TCP(pkt.TransportHeader().Push(header.TCPMinimumSize + optLen)) pkt.TransportProtocolNumber = header.TCPProtocolNumber @@ -850,7 +848,7 @@ func buildTCPHdr(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso st } } -func sendTCPBatch(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error { +func sendTCPBatch(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error { optLen := len(tf.opts) if tf.rcvWnd > math.MaxUint16 { tf.rcvWnd = math.MaxUint16 @@ -901,7 +899,7 @@ func sendTCPBatch(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso s // sendTCP sends a TCP segment with the provided options via the provided // network endpoint and under the provided identity. This method takes // ownership of pkt. -func sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error { +func sendTCP(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso stack.GSO, owner tcpip.PacketOwner) tcpip.Error { if tf.rcvWnd > math.MaxUint16 { tf.rcvWnd = math.MaxUint16 } @@ -927,7 +925,7 @@ func sendTCP(r *stack.Route, tf tcpFields, pkt stack.PacketBufferPtr, gso stack. } // makeOptions makes an options slice. -func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte { +func (e *Endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte { options := getOptions() offset := 0 @@ -966,7 +964,7 @@ func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte { } // sendEmptyRaw sends a TCP segment with no payload to the endpoint's peer. -func (e *endpoint) sendEmptyRaw(flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error { +func (e *Endpoint) sendEmptyRaw(flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error { pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{}) defer pkt.DecRef() return e.sendRaw(pkt, flags, seq, ack, rcvWnd) @@ -974,7 +972,7 @@ func (e *endpoint) sendEmptyRaw(flags header.TCPFlags, seq, ack seqnum.Value, rc // sendRaw sends a TCP segment to the endpoint's peer. This method takes // ownership of pkt. pkt must not have any headers set. -func (e *endpoint) sendRaw(pkt stack.PacketBufferPtr, flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error { +func (e *Endpoint) sendRaw(pkt *stack.PacketBuffer, flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error { var sackBlocks []header.SACKBlock if e.EndpointState() == StateEstablished && e.rcv.pendingRcvdSegments.Len() > 0 && (flags&header.TCPFlagAck != 0) { sackBlocks = e.sack.Blocks[:e.sack.NumBlocks] @@ -996,7 +994,7 @@ func (e *endpoint) sendRaw(pkt stack.PacketBufferPtr, flags header.TCPFlags, seq // +checklocks:e.mu // +checklocksalias:e.snd.ep.mu=e.mu -func (e *endpoint) sendData(next *segment) { +func (e *Endpoint) sendData(next *segment) { // Initialize the next segment to write if it's currently nil. if e.snd.writeNext == nil { if next == nil { @@ -1014,7 +1012,7 @@ func (e *endpoint) sendData(next *segment) { // indicating that the connection is being reset due to receiving a RST. This // method must only be called from the protocol goroutine. // +checklocks:e.mu -func (e *endpoint) resetConnectionLocked(err tcpip.Error) { +func (e *Endpoint) resetConnectionLocked(err tcpip.Error) { // Only send a reset if the connection is being aborted for a reason // other than receiving a reset. e.hardError = err @@ -1049,7 +1047,7 @@ func (e *endpoint) resetConnectionLocked(err tcpip.Error) { // delivered to this endpoint from the demuxer when the endpoint // is transitioned to StateClose. // +checklocks:e.mu -func (e *endpoint) transitionToStateCloseLocked() { +func (e *Endpoint) transitionToStateCloseLocked() { s := e.EndpointState() if s == StateClose { return @@ -1068,7 +1066,7 @@ func (e *endpoint) transitionToStateCloseLocked() { // segment to any other endpoint other than the current one. This is called // only when the endpoint is in StateClose and we want to deliver the segment // to any other listening endpoint. We reply with RST if we cannot find one. -func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) { +func (e *Endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) { ep := e.stack.FindTransportEndpoint(e.NetProto, e.TransProto, e.TransportEndpointInfo.ID, s.pkt.NICID) if ep == nil && e.NetProto == header.IPv6ProtocolNumber && e.TransportEndpointInfo.ID.LocalAddress.To4() != (tcpip.Address{}) { // Dual-stack socket, try IPv4. @@ -1087,10 +1085,10 @@ func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) { } if e == ep { - panic(fmt.Sprintf("current endpoint not removed from demuxer, enqueing segments to itself, endpoint in state %v", e.EndpointState())) + panic(fmt.Sprintf("current endpoint not removed from demuxer, enqueuing segments to itself, endpoint in state %v", e.EndpointState())) } - if ep := ep.(*endpoint); ep.enqueueSegment(s) { + if ep := ep.(*Endpoint); ep.enqueueSegment(s) { ep.notifyProcessor() } } @@ -1098,7 +1096,7 @@ func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) { // Drain segment queue from the endpoint and try to re-match the segment to a // different endpoint. This is used when the current endpoint is transitioned to // StateClose and has been unregistered from the transport demuxer. -func (e *endpoint) drainClosingSegmentQueue() { +func (e *Endpoint) drainClosingSegmentQueue() { for { s := e.segmentQueue.dequeue() if s == nil { @@ -1111,7 +1109,7 @@ func (e *endpoint) drainClosingSegmentQueue() { } // +checklocks:e.mu -func (e *endpoint) handleReset(s *segment) (ok bool, err tcpip.Error) { +func (e *Endpoint) handleReset(s *segment) (ok bool, err tcpip.Error) { if e.rcv.acceptable(s.sequenceNumber, 0) { // RFC 793, page 37 states that "in all states // except SYN-SENT, all reset (RST) segments are @@ -1160,7 +1158,7 @@ func (e *endpoint) handleReset(s *segment) (ok bool, err tcpip.Error) { // // +checklocks:e.mu // +checklocksalias:e.snd.ep.mu=e.mu -func (e *endpoint) handleSegmentsLocked() tcpip.Error { +func (e *Endpoint) handleSegmentsLocked() tcpip.Error { sndUna := e.snd.SndUna for i := 0; i < maxSegmentsPerWake; i++ { if state := e.EndpointState(); state.closed() || state == StateTimeWait || state == StateError { @@ -1202,7 +1200,7 @@ func (e *endpoint) handleSegmentsLocked() tcpip.Error { } // +checklocks:e.mu -func (e *endpoint) probeSegmentLocked() { +func (e *Endpoint) probeSegmentLocked() { if fn := e.probe; fn != nil { var state stack.TCPEndpointState e.completeStateLocked(&state) @@ -1216,7 +1214,7 @@ func (e *endpoint) probeSegmentLocked() { // +checklocks:e.mu // +checklocksalias:e.rcv.ep.mu=e.mu // +checklocksalias:e.snd.ep.mu=e.mu -func (e *endpoint) handleSegmentLocked(s *segment) (cont bool, err tcpip.Error) { +func (e *Endpoint) handleSegmentLocked(s *segment) (cont bool, err tcpip.Error) { // Invoke the tcp probe if installed. The tcp probe function will update // the TCPEndpointState after the segment is processed. defer e.probeSegmentLocked() @@ -1291,11 +1289,16 @@ func (e *endpoint) handleSegmentLocked(s *segment) (cont bool, err tcpip.Error) // from the other side after a number of tries, we terminate the connection. // +checklocks:e.mu // +checklocksalias:e.snd.ep.mu=e.mu -func (e *endpoint) keepaliveTimerExpired() tcpip.Error { +func (e *Endpoint) keepaliveTimerExpired() tcpip.Error { userTimeout := e.userTimeout + // If the route is not ready or already cleaned up, then we don't need to + // send keepalives. + if e.route == nil { + return nil + } e.keepalive.Lock() - if !e.SocketOptions().GetKeepAlive() || e.keepalive.timer.isZero() || !e.keepalive.timer.checkExpiration() { + if !e.SocketOptions().GetKeepAlive() || e.keepalive.timer.isUninitialized() || !e.keepalive.timer.checkExpiration() { e.keepalive.Unlock() return nil } @@ -1325,10 +1328,10 @@ func (e *endpoint) keepaliveTimerExpired() tcpip.Error { // resetKeepaliveTimer restarts or stops the keepalive timer, depending on // whether it is enabled for this endpoint. -func (e *endpoint) resetKeepaliveTimer(receivedData bool) { +func (e *Endpoint) resetKeepaliveTimer(receivedData bool) { e.keepalive.Lock() defer e.keepalive.Unlock() - if e.keepalive.timer.isZero() { + if e.keepalive.timer.isUninitialized() { if state := e.EndpointState(); !state.closed() { panic(fmt.Sprintf("Unexpected state when the keepalive time is cleaned up, got %s, want %s or %s", state, StateClose, StateError)) } @@ -1351,7 +1354,7 @@ func (e *endpoint) resetKeepaliveTimer(receivedData bool) { } // disableKeepaliveTimer stops the keepalive timer. -func (e *endpoint) disableKeepaliveTimer() { +func (e *Endpoint) disableKeepaliveTimer() { e.keepalive.Lock() e.keepalive.timer.disable() e.keepalive.Unlock() @@ -1359,7 +1362,7 @@ func (e *endpoint) disableKeepaliveTimer() { // finWait2TimerExpired is called when the FIN-WAIT-2 timeout is hit // and the peer hasn't sent us a FIN. -func (e *endpoint) finWait2TimerExpired() { +func (e *Endpoint) finWait2TimerExpired() { e.mu.Lock() e.transitionToStateCloseLocked() e.mu.Unlock() @@ -1368,7 +1371,7 @@ func (e *endpoint) finWait2TimerExpired() { } // +checklocks:e.mu -func (e *endpoint) handshakeFailed(err tcpip.Error) { +func (e *Endpoint) handshakeFailed(err tcpip.Error) { e.lastErrorMu.Lock() e.lastError = err e.lastErrorMu.Unlock() @@ -1388,7 +1391,7 @@ func (e *endpoint) handshakeFailed(err tcpip.Error) { // state. // +checklocks:e.mu // +checklocksalias:e.rcv.ep.mu=e.mu -func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func()) { +func (e *Endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func()) { for i := 0; i < maxSegmentsPerWake; i++ { s := e.segmentQueue.dequeue() if s == nil { @@ -1409,7 +1412,7 @@ func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func() } for _, netProto := range netProtos { if listenEP := e.stack.FindTransportEndpoint(netProto, info.TransProto, newID, s.pkt.NICID); listenEP != nil { - tcpEP := listenEP.(*endpoint) + tcpEP := listenEP.(*Endpoint) if EndpointState(tcpEP.State()) == StateListen { reuseTW = func() { if !tcpEP.enqueueSegment(s) { @@ -1434,7 +1437,7 @@ func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func() } // +checklocks:e.mu -func (e *endpoint) getTimeWaitDuration() time.Duration { +func (e *Endpoint) getTimeWaitDuration() time.Duration { timeWaitDuration := DefaultTCPTimeWaitTimeout // Get the stack wide configuration. @@ -1448,7 +1451,7 @@ func (e *endpoint) getTimeWaitDuration() time.Duration { // timeWaitTimerExpired is called when an endpoint completes the required time // (typically 2 * MSL unless configured to something else at a stack level) in // TIME-WAIT state. -func (e *endpoint) timeWaitTimerExpired() { +func (e *Endpoint) timeWaitTimerExpired() { e.mu.Lock() if e.EndpointState() != StateTimeWait { e.mu.Unlock() @@ -1461,7 +1464,7 @@ func (e *endpoint) timeWaitTimerExpired() { } // notifyProcessor queues this endpoint for processing to its TCP processor. -func (e *endpoint) notifyProcessor() { +func (e *Endpoint) notifyProcessor() { // We use TryLock here to avoid deadlocks in cases where a listening endpoint that is being // closed tries to abort half completed connections which in turn try to queue any segments // queued to that endpoint back to the same listening endpoint (because it may have got diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go index 6985194b..ff33a71b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/cubic.go @@ -192,7 +192,7 @@ func (c *cubicState) fastConvergence() { c.K = math.Cbrt(c.WMax * (1 - c.Beta) / c.C) } -// PostRecovery implemements congestionControl.PostRecovery. +// PostRecovery implements congestionControl.PostRecovery. func (c *cubicState) PostRecovery() { c.T = c.s.ep.stack.Clock().NowMonotonic() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go index b647b781..043b8410 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go @@ -35,7 +35,7 @@ type epQueue struct { } // enqueue adds e to the queue if the endpoint is not already on the queue. -func (q *epQueue) enqueue(e *endpoint) { +func (q *epQueue) enqueue(e *Endpoint) { q.mu.Lock() defer q.mu.Unlock() e.pendingProcessingMu.Lock() @@ -50,7 +50,7 @@ func (q *epQueue) enqueue(e *endpoint) { // dequeue removes and returns the first element from the queue if available, // returns nil otherwise. -func (q *epQueue) dequeue() *endpoint { +func (q *epQueue) dequeue() *Endpoint { q.mu.Lock() if e := q.list.Front(); e != nil { q.list.Remove(e) @@ -87,7 +87,7 @@ func (p *processor) close() { p.closeWaker.Assert() } -func (p *processor) queueEndpoint(ep *endpoint) { +func (p *processor) queueEndpoint(ep *Endpoint) { // Queue an endpoint for processing by the processor goroutine. p.epQ.enqueue(ep) p.newEndpointWaker.Assert() @@ -97,7 +97,7 @@ func (p *processor) queueEndpoint(ep *endpoint) { // of its associated listening endpoint. // // +checklocks:ep.mu -func deliverAccepted(ep *endpoint) bool { +func deliverAccepted(ep *Endpoint) bool { lEP := ep.h.listenEP lEP.acceptMu.Lock() @@ -129,7 +129,7 @@ func deliverAccepted(ep *endpoint) bool { // handleConnecting is responsible for TCP processing for an endpoint in one of // the connecting states. -func (p *processor) handleConnecting(ep *endpoint) { +func handleConnecting(ep *Endpoint) { if !ep.TryLock() { return } @@ -172,7 +172,7 @@ func (p *processor) handleConnecting(ep *endpoint) { // handleConnected is responsible for TCP processing for an endpoint in one of // the connected states(StateEstablished, StateFinWait1 etc.) -func (p *processor) handleConnected(ep *endpoint) { +func handleConnected(ep *Endpoint) { if !ep.TryLock() { return } @@ -200,7 +200,7 @@ func (p *processor) handleConnected(ep *endpoint) { ep.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.ReadableEvents | waiter.WritableEvents) return case ep.EndpointState() == StateTimeWait: - p.startTimeWait(ep) + startTimeWait(ep) } ep.mu.Unlock() } @@ -208,7 +208,7 @@ func (p *processor) handleConnected(ep *endpoint) { // startTimeWait starts a new goroutine to handle TIME-WAIT. // // +checklocks:ep.mu -func (p *processor) startTimeWait(ep *endpoint) { +func startTimeWait(ep *Endpoint) { // Disable close timer as we are now entering real TIME_WAIT. if ep.finWait2Timer != nil { ep.finWait2Timer.Stop() @@ -221,7 +221,7 @@ func (p *processor) startTimeWait(ep *endpoint) { // handleTimeWait is responsible for TCP processing for an endpoint in TIME-WAIT // state. -func (p *processor) handleTimeWait(ep *endpoint) { +func handleTimeWait(ep *Endpoint) { if !ep.TryLock() { return } @@ -251,7 +251,7 @@ func (p *processor) handleTimeWait(ep *endpoint) { // handleListen is responsible for TCP processing for an endpoint in LISTEN // state. -func (p *processor) handleListen(ep *endpoint) { +func handleListen(ep *Endpoint) { if !ep.TryLock() { return } @@ -307,13 +307,13 @@ func (p *processor) start(wg *sync.WaitGroup) { } switch state := ep.EndpointState(); { case state.connecting(): - p.handleConnecting(ep) + handleConnecting(ep) case state.connected() && state != StateTimeWait: - p.handleConnected(ep) + handleConnected(ep) case state == StateTimeWait: - p.handleTimeWait(ep) + handleTimeWait(ep) case state == StateListen: - p.handleListen(ep) + handleListen(ep) case state == StateError || state == StateClose: // Try to redeliver any still queued // packets to another endpoint or send a @@ -409,7 +409,7 @@ func (d *dispatcher) wait() { // queuePacket queues an incoming packet to the matching tcp endpoint and // also queues the endpoint to a processor queue for processing. -func (d *dispatcher) queuePacket(stackEP stack.TransportEndpoint, id stack.TransportEndpointID, clock tcpip.Clock, pkt stack.PacketBufferPtr) { +func (d *dispatcher) queuePacket(stackEP stack.TransportEndpoint, id stack.TransportEndpointID, clock tcpip.Clock, pkt *stack.PacketBuffer) { d.mu.Lock() closed := d.closed d.mu.Unlock() @@ -418,7 +418,7 @@ func (d *dispatcher) queuePacket(stackEP stack.TransportEndpoint, id stack.Trans return } - ep := stackEP.(*endpoint) + ep := stackEP.(*Endpoint) s, err := newIncomingSegment(id, clock, pkt) if err != nil { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go index 6dcf557e..cf0b0093 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go @@ -16,7 +16,6 @@ package tcp import ( "container/heap" - "encoding/binary" "fmt" "io" "math" @@ -29,7 +28,6 @@ import ( "gvisor.dev/gvisor/pkg/sleep" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/hash/jenkins" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/ports" "gvisor.dev/gvisor/pkg/tcpip/seqnum" @@ -77,6 +75,17 @@ const ( SegOverheadFactor = 2 ) +type connDirectionState uint32 + +// Connection direction states used for directionState checks in endpoint struct +// to detect half-closed connection and deliver POLLRDHUP +const ( + connDirectionStateOpen connDirectionState = 0 + connDirectionStateRcvClosed connDirectionState = 1 + connDirectionStateSndClosed connDirectionState = 2 + connDirectionStateAll connDirectionState = connDirectionStateOpen | connDirectionStateRcvClosed | connDirectionStateSndClosed +) + // connected returns true when s is one of the states representing an // endpoint connected to a peer. func (s EndpointState) connected() bool { @@ -294,7 +303,7 @@ func (sq *sndQueueInfo) CloneState(other *stack.TCPSndBufState) { other.AutoTuneSndBufDisabled = atomicbitops.FromUint32(sq.AutoTuneSndBufDisabled.RacyLoad()) } -// endpoint represents a TCP endpoint. This struct serves as the interface +// Endpoint represents a TCP endpoint. This struct serves as the interface // between users of the endpoint and the protocol implementation; it is legal to // have concurrent goroutines make calls into the endpoint, they are properly // synchronized. The protocol implementation, however, runs in a single @@ -334,12 +343,12 @@ func (sq *sndQueueInfo) CloneState(other *stack.TCPSndBufState) { // e.LockUser/e.UnlockUser methods. // // +stateify savable -type endpoint struct { +type Endpoint struct { stack.TCPEndpointStateInner stack.TransportEndpointInfo tcpip.DefaultSocketOptionsHandler - // endpointEntry is used to queue endpoints for processing to the + // EndpointEntry is used to queue endpoints for processing to the // a given tcp processor goroutine. // // Precondition: epQueue.mu must be held to read/write this field.. @@ -399,6 +408,10 @@ type endpoint struct { // methods. state atomicbitops.Uint32 `state:".(EndpointState)"` + // connectionDirectionState holds current state of send and receive, + // accessed atomically + connectionDirectionState atomicbitops.Uint32 + // origEndpointState is only used during a restore phase to save the // endpoint state at restore time as the socket is moved to it's correct // state. @@ -582,10 +595,15 @@ type endpoint struct { // listenCtx is used by listening endpoints to store state used while listening for // connections. Nil otherwise. listenCtx *listenContext `state:"nosave"` + + // limRdr is reused to avoid allocations. + // + // +checklocks:mu + limRdr *io.LimitedReader `state:"nosave"` } // UniqueID implements stack.TransportEndpoint.UniqueID. -func (e *endpoint) UniqueID() uint64 { +func (e *Endpoint) UniqueID() uint64 { return e.uniqueID } @@ -607,7 +625,7 @@ func calculateAdvertisedMSS(userMSS uint16, r *stack.Route) uint16 { // isOwnedByUser() returns true if the endpoint lock is currently // held by a user(syscall) goroutine. -func (e *endpoint) isOwnedByUser() bool { +func (e *Endpoint) isOwnedByUser() bool { return e.ownedByUser.Load() == 1 } @@ -621,7 +639,7 @@ func (e *endpoint) isOwnedByUser() bool { // should not be holding the lock for long and spinning reduces latency as we // avoid an expensive sleep/wakeup of the syscall goroutine). // +checklocksacquire:e.mu -func (e *endpoint) LockUser() { +func (e *Endpoint) LockUser() { const iterations = 5 for i := 0; i < iterations; i++ { // Try first if the sock is locked then check if it's owned @@ -676,7 +694,7 @@ func (e *endpoint) LockUser() { // // Precondition: e.LockUser() must have been called before calling e.UnlockUser() // +checklocksrelease:e.mu -func (e *endpoint) UnlockUser() { +func (e *Endpoint) UnlockUser() { // Lock segment queue before checking so that we avoid a race where // segments can be queued between the time we check if queue is empty // and actually unlock the endpoint mutex. @@ -709,13 +727,13 @@ func (e *endpoint) UnlockUser() { // StopWork halts packet processing. Only to be used in tests. // +checklocksacquire:e.mu -func (e *endpoint) StopWork() { +func (e *Endpoint) StopWork() { e.mu.Lock() } // ResumeWork resumes packet processing. Only to be used in tests. // +checklocksrelease:e.mu -func (e *endpoint) ResumeWork() { +func (e *Endpoint) ResumeWork() { e.mu.Unlock() } @@ -728,7 +746,7 @@ func (e *endpoint) ResumeWork() { // variable locks. // +checklocks:locked.mu // +checklocksacquire:e.mu -func (e *endpoint) AssertLockHeld(locked *endpoint) { +func (e *Endpoint) AssertLockHeld(locked *Endpoint) { if e != locked { panic("AssertLockHeld failed: locked endpoint != asserting endpoint") } @@ -738,7 +756,7 @@ func (e *endpoint) AssertLockHeld(locked *endpoint) { // adds the necessary checklocks annotations. // TODO(b/226403629): Remove this once checklocks understands TryLock. // +checklocksacquire:e.mu -func (e *endpoint) TryLock() bool { +func (e *Endpoint) TryLock() bool { if e.mu.TryLock() { return true // +checklocksforce } @@ -750,7 +768,7 @@ func (e *endpoint) TryLock() bool { // package but we allow the state to be read freely without holding e.mu. // // +checklocks:e.mu -func (e *endpoint) setEndpointState(state EndpointState) { +func (e *Endpoint) setEndpointState(state EndpointState) { oldstate := EndpointState(e.state.Swap(uint32(state))) switch state { case StateEstablished: @@ -774,18 +792,18 @@ func (e *endpoint) setEndpointState(state EndpointState) { } // EndpointState returns the current state of the endpoint. -func (e *endpoint) EndpointState() EndpointState { +func (e *Endpoint) EndpointState() EndpointState { return EndpointState(e.state.Load()) } // setRecentTimestamp sets the recentTS field to the provided value. -func (e *endpoint) setRecentTimestamp(recentTS uint32) { +func (e *Endpoint) setRecentTimestamp(recentTS uint32) { e.RecentTS = recentTS e.recentTSTime = e.stack.Clock().NowMonotonic() } // recentTimestamp returns the value of the recentTS field. -func (e *endpoint) recentTimestamp() uint32 { +func (e *Endpoint) recentTimestamp() uint32 { return e.RecentTS } @@ -823,8 +841,8 @@ type keepalive struct { waker sleep.Waker `state:"nosave"` } -func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) *endpoint { - e := &endpoint{ +func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) *Endpoint { + e := &Endpoint{ stack: s, protocol: protocol, TransportEndpointInfo: stack.TransportEndpointInfo{ @@ -843,12 +861,15 @@ func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProto interval: DefaultKeepaliveInterval, count: DefaultKeepaliveCount, }, - uniqueID: s.UniqueID(), - ipv4TTL: tcpip.UseDefaultIPv4TTL, - ipv6HopLimit: tcpip.UseDefaultIPv6HopLimit, - txHash: s.Rand().Uint32(), + uniqueID: s.UniqueID(), + ipv4TTL: tcpip.UseDefaultIPv4TTL, + ipv6HopLimit: tcpip.UseDefaultIPv6HopLimit, + // txHash only determines which outgoing queue to use, so + // InsecureRNG is fine. + txHash: s.InsecureRNG().Uint32(), windowClamp: DefaultReceiveBufferSize, maxSynRetries: DefaultSynRetries, + limRdr: &io.LimitedReader{}, } e.ops.InitHandler(e, e.stack, GetTCPSendBufferLimits, GetTCPReceiveBufferLimits) e.ops.SetMulticastLoop(true) @@ -899,14 +920,14 @@ func newEndpoint(s *stack.Stack, protocol *protocol, netProto tcpip.NetworkProto // TODO(https://gvisor.dev/issues/7493): Defer creating the timer until TCP connection becomes // established. - e.keepalive.timer.init(e.stack.Clock(), maybeFailTimerHandler(e, e.keepaliveTimerExpired)) + e.keepalive.timer.init(e.stack.Clock(), timerHandler(e, e.keepaliveTimerExpired)) return e } // Readiness returns the current readiness of the endpoint. For example, if // waiter.EventIn is set, the endpoint is immediately readable. -func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { +func (e *Endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { result := waiter.EventMask(0) switch e.EndpointState() { @@ -940,6 +961,9 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { if e.sndQueueInfo.SndClosed || e.sndQueueInfo.SndBufUsed < sndBufSize { result |= waiter.WritableEvents } + if e.sndQueueInfo.SndClosed { + e.updateConnDirectionState(connDirectionStateSndClosed) + } e.sndQueueInfo.sndQueueMu.Unlock() } @@ -949,15 +973,23 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { if e.RcvBufUsed > 0 || e.RcvClosed { result |= waiter.ReadableEvents } + if e.RcvClosed { + e.updateConnDirectionState(connDirectionStateRcvClosed) + } e.rcvQueueMu.Unlock() } } + // Determine whether endpoint is half-closed with rcv shutdown + if e.connDirectionState() == connDirectionStateRcvClosed { + result |= waiter.EventRdHUp + } + return result } // Purging pending rcv segments is only necessary on RST. -func (e *endpoint) purgePendingRcvQueue() { +func (e *Endpoint) purgePendingRcvQueue() { if e.rcv != nil { for e.rcv.pendingRcvdSegments.Len() > 0 { s := heap.Pop(&e.rcv.pendingRcvdSegments).(*segment) @@ -967,7 +999,7 @@ func (e *endpoint) purgePendingRcvQueue() { } // +checklocks:e.mu -func (e *endpoint) purgeReadQueue() { +func (e *Endpoint) purgeReadQueue() { if e.rcv != nil { e.rcvQueueMu.Lock() defer e.rcvQueueMu.Unlock() @@ -984,7 +1016,7 @@ func (e *endpoint) purgeReadQueue() { } // +checklocks:e.mu -func (e *endpoint) purgeWriteQueue() { +func (e *Endpoint) purgeWriteQueue() { if e.snd != nil { e.sndQueueInfo.sndQueueMu.Lock() defer e.sndQueueInfo.sndQueueMu.Unlock() @@ -1003,7 +1035,7 @@ func (e *endpoint) purgeWriteQueue() { } // Abort implements stack.TransportEndpoint.Abort. -func (e *endpoint) Abort() { +func (e *Endpoint) Abort() { defer e.drainClosingSegmentQueue() e.LockUser() defer e.UnlockUser() @@ -1021,7 +1053,7 @@ func (e *endpoint) Abort() { // Close puts the endpoint in a closed state and frees all resources associated // with it. It must be called only once and with no other concurrent calls to // the endpoint. -func (e *endpoint) Close() { +func (e *Endpoint) Close() { e.LockUser() if e.closed { e.UnlockUser() @@ -1045,7 +1077,7 @@ func (e *endpoint) Close() { } // +checklocks:e.mu -func (e *endpoint) closeLocked() { +func (e *Endpoint) closeLocked() { linger := e.SocketOptions().GetLinger() if linger.Enabled && linger.Timeout == 0 { s := e.EndpointState() @@ -1066,7 +1098,7 @@ func (e *endpoint) closeLocked() { // closeNoShutdown closes the endpoint without doing a full shutdown. // +checklocks:e.mu -func (e *endpoint) closeNoShutdownLocked() { +func (e *Endpoint) closeNoShutdownLocked() { // For listening sockets, we always release ports inline so that they // are immediately available for reuse after Close() is called. If also // registered, we unregister as well otherwise the next user would fail @@ -1126,15 +1158,15 @@ func (e *endpoint) closeNoShutdownLocked() { // closePendingAcceptableConnections closes all connections that have completed // handshake but not yet been delivered to the application. -func (e *endpoint) closePendingAcceptableConnectionsLocked() { +func (e *Endpoint) closePendingAcceptableConnectionsLocked() { e.acceptMu.Lock() pendingEndpoints := e.acceptQueue.pendingEndpoints e.acceptQueue.pendingEndpoints = nil - completedEndpoints := make([]*endpoint, 0, e.acceptQueue.endpoints.Len()) + completedEndpoints := make([]*Endpoint, 0, e.acceptQueue.endpoints.Len()) for n := e.acceptQueue.endpoints.Front(); n != nil; n = n.Next() { - completedEndpoints = append(completedEndpoints, n.Value.(*endpoint)) + completedEndpoints = append(completedEndpoints, n.Value.(*Endpoint)) } e.acceptQueue.endpoints.Init() e.acceptQueue.capacity = 0 @@ -1153,11 +1185,12 @@ func (e *endpoint) closePendingAcceptableConnectionsLocked() { // cleanupLocked frees all resources associated with the endpoint. // +checklocks:e.mu -func (e *endpoint) cleanupLocked() { +func (e *Endpoint) cleanupLocked() { if e.snd != nil { e.snd.resendTimer.cleanup() e.snd.probeTimer.cleanup() e.snd.reorderTimer.cleanup() + e.snd.corkTimer.cleanup() } if e.finWait2Timer != nil { @@ -1218,7 +1251,7 @@ func wndFromSpace(space int) int { // initialReceiveWindow returns the initial receive window to advertise in the // SYN/SYN-ACK. -func (e *endpoint) initialReceiveWindow() int { +func (e *Endpoint) initialReceiveWindow() int { rcvWnd := wndFromSpace(e.receiveBufferAvailable()) if rcvWnd > math.MaxUint16 { rcvWnd = math.MaxUint16 @@ -1247,7 +1280,7 @@ func (e *endpoint) initialReceiveWindow() int { // ModerateRecvBuf adjusts the receive buffer and the advertised window // based on the number of bytes copied to userspace. -func (e *endpoint) ModerateRecvBuf(copied int) { +func (e *Endpoint) ModerateRecvBuf(copied int) { e.LockUser() defer e.UnlockUser() @@ -1325,19 +1358,19 @@ func (e *endpoint) ModerateRecvBuf(copied int) { } // SetOwner implements tcpip.Endpoint.SetOwner. -func (e *endpoint) SetOwner(owner tcpip.PacketOwner) { +func (e *Endpoint) SetOwner(owner tcpip.PacketOwner) { e.owner = owner } // +checklocks:e.mu -func (e *endpoint) hardErrorLocked() tcpip.Error { +func (e *Endpoint) hardErrorLocked() tcpip.Error { err := e.hardError e.hardError = nil return err } // +checklocks:e.mu -func (e *endpoint) lastErrorLocked() tcpip.Error { +func (e *Endpoint) lastErrorLocked() tcpip.Error { e.lastErrorMu.Lock() defer e.lastErrorMu.Unlock() err := e.lastError @@ -1346,7 +1379,7 @@ func (e *endpoint) lastErrorLocked() tcpip.Error { } // LastError implements tcpip.Endpoint.LastError. -func (e *endpoint) LastError() tcpip.Error { +func (e *Endpoint) LastError() tcpip.Error { e.LockUser() defer e.UnlockUser() if err := e.hardErrorLocked(); err != nil { @@ -1358,12 +1391,12 @@ func (e *endpoint) LastError() tcpip.Error { // LastErrorLocked reads and clears lastError. // Only to be used in tests. // +checklocks:e.mu -func (e *endpoint) LastErrorLocked() tcpip.Error { +func (e *Endpoint) LastErrorLocked() tcpip.Error { return e.lastErrorLocked() } // UpdateLastError implements tcpip.SocketOptionsHandler.UpdateLastError. -func (e *endpoint) UpdateLastError(err tcpip.Error) { +func (e *Endpoint) UpdateLastError(err tcpip.Error) { e.LockUser() e.lastErrorMu.Lock() e.lastError = err @@ -1372,7 +1405,7 @@ func (e *endpoint) UpdateLastError(err tcpip.Error) { } // Read implements tcpip.Endpoint.Read. -func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult, tcpip.Error) { +func (e *Endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult, tcpip.Error) { e.LockUser() defer e.UnlockUser() @@ -1450,7 +1483,7 @@ func (e *endpoint) Read(dst io.Writer, opts tcpip.ReadOptions) (tcpip.ReadResult // checkRead checks that endpoint is in a readable state. // // +checklocks:e.mu -func (e *endpoint) checkReadLocked() tcpip.Error { +func (e *Endpoint) checkReadLocked() tcpip.Error { e.rcvQueueMu.Lock() defer e.rcvQueueMu.Unlock() // When in SYN-SENT state, let the caller block on the receive. @@ -1493,7 +1526,7 @@ func (e *endpoint) checkReadLocked() tcpip.Error { // indicating the reason why it's not writable. // +checklocks:e.mu // +checklocks:e.sndQueueInfo.sndQueueMu -func (e *endpoint) isEndpointWritableLocked() (int, tcpip.Error) { +func (e *Endpoint) isEndpointWritableLocked() (int, tcpip.Error) { // The endpoint cannot be written to if it's not connected. switch s := e.EndpointState(); { case s == StateError: @@ -1527,13 +1560,19 @@ func (e *endpoint) isEndpointWritableLocked() (int, tcpip.Error) { // readFromPayloader reads a slice from the Payloader. // +checklocks:e.mu // +checklocks:e.sndQueueInfo.sndQueueMu -func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, avail int) (buffer.Buffer, tcpip.Error) { +func (e *Endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, avail int) (buffer.Buffer, tcpip.Error) { // We can release locks while copying data. // // This is not possible if atomic is set, because we can't allow the // available buffer space to be consumed by some other caller while we // are copying data in. + limRdr := e.limRdr if !opts.Atomic { + defer func() { + e.limRdr = limRdr + }() + e.limRdr = nil + e.sndQueueInfo.sndQueueMu.Unlock() defer e.sndQueueInfo.sndQueueMu.Lock() @@ -1549,7 +1588,7 @@ func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, if avail == 0 { return payload, nil } - if _, err := payload.WriteFromReader(p, int64(avail)); err != nil { + if _, err := payload.WriteFromReaderAndLimitedReader(p, int64(avail), limRdr); err != nil { payload.Release() return buffer.Buffer{}, &tcpip.ErrBadBuffer{} } @@ -1558,7 +1597,7 @@ func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, // queueSegment reads data from the payloader and returns a segment to be sent. // +checklocks:e.mu -func (e *endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*segment, int, tcpip.Error) { +func (e *Endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*segment, int, tcpip.Error) { e.sndQueueInfo.sndQueueMu.Lock() defer e.sndQueueInfo.sndQueueMu.Unlock() @@ -1606,7 +1645,7 @@ func (e *endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*se } // Write writes data to the endpoint's peer. -func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcpip.Error) { +func (e *Endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcpip.Error) { // Linux completely ignores any address passed to sendto(2) for TCP sockets // (without the MSG_FASTOPEN flag). Corking is unimplemented, so opts.More // and opts.EndOfRecord are also ignored. @@ -1629,7 +1668,7 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp // applied. // +checklocks:e.mu // +checklocks:e.rcvQueueMu -func (e *endpoint) selectWindowLocked(rcvBufSize int) (wnd seqnum.Size) { +func (e *Endpoint) selectWindowLocked(rcvBufSize int) (wnd seqnum.Size) { wndFromAvailable := wndFromSpace(e.receiveBufferAvailableLocked(rcvBufSize)) maxWindow := wndFromSpace(rcvBufSize) wndFromUsedBytes := maxWindow - e.RcvBufUsed @@ -1652,7 +1691,7 @@ func (e *endpoint) selectWindowLocked(rcvBufSize int) (wnd seqnum.Size) { // selectWindow invokes selectWindowLocked after acquiring e.rcvQueueMu. // +checklocks:e.mu -func (e *endpoint) selectWindow() (wnd seqnum.Size) { +func (e *Endpoint) selectWindow() (wnd seqnum.Size) { e.rcvQueueMu.Lock() wnd = e.selectWindowLocked(int(e.ops.GetReceiveBufferSize())) e.rcvQueueMu.Unlock() @@ -1675,7 +1714,7 @@ func (e *endpoint) selectWindow() (wnd seqnum.Size) { // // +checklocks:e.mu // +checklocks:e.rcvQueueMu -func (e *endpoint) windowCrossedACKThresholdLocked(deltaBefore int, rcvBufSize int) (crossed bool, above bool) { +func (e *Endpoint) windowCrossedACKThresholdLocked(deltaBefore int, rcvBufSize int) (crossed bool, above bool) { newAvail := int(e.selectWindowLocked(rcvBufSize)) oldAvail := newAvail - deltaBefore if oldAvail < 0 { @@ -1699,28 +1738,28 @@ func (e *endpoint) windowCrossedACKThresholdLocked(deltaBefore int, rcvBufSize i } // OnReuseAddressSet implements tcpip.SocketOptionsHandler.OnReuseAddressSet. -func (e *endpoint) OnReuseAddressSet(v bool) { +func (e *Endpoint) OnReuseAddressSet(v bool) { e.LockUser() e.portFlags.TupleOnly = v e.UnlockUser() } // OnReusePortSet implements tcpip.SocketOptionsHandler.OnReusePortSet. -func (e *endpoint) OnReusePortSet(v bool) { +func (e *Endpoint) OnReusePortSet(v bool) { e.LockUser() e.portFlags.LoadBalanced = v e.UnlockUser() } // OnKeepAliveSet implements tcpip.SocketOptionsHandler.OnKeepAliveSet. -func (e *endpoint) OnKeepAliveSet(bool) { +func (e *Endpoint) OnKeepAliveSet(bool) { e.LockUser() e.resetKeepaliveTimer(true /* receivedData */) e.UnlockUser() } // OnDelayOptionSet implements tcpip.SocketOptionsHandler.OnDelayOptionSet. -func (e *endpoint) OnDelayOptionSet(v bool) { +func (e *Endpoint) OnDelayOptionSet(v bool) { if !v { e.LockUser() defer e.UnlockUser() @@ -1732,10 +1771,13 @@ func (e *endpoint) OnDelayOptionSet(v bool) { } // OnCorkOptionSet implements tcpip.SocketOptionsHandler.OnCorkOptionSet. -func (e *endpoint) OnCorkOptionSet(v bool) { +func (e *Endpoint) OnCorkOptionSet(v bool) { if !v { e.LockUser() defer e.UnlockUser() + if e.snd != nil { + e.snd.corkTimer.disable() + } // Handle the corked data. if e.EndpointState().connected() { e.sendData(nil /* next */) @@ -1743,12 +1785,12 @@ func (e *endpoint) OnCorkOptionSet(v bool) { } } -func (e *endpoint) getSendBufferSize() int { +func (e *Endpoint) getSendBufferSize() int { return int(e.ops.GetSendBufferSize()) } // OnSetReceiveBufferSize implements tcpip.SocketOptionsHandler.OnSetReceiveBufferSize. -func (e *endpoint) OnSetReceiveBufferSize(rcvBufSz, oldSz int64) (newSz int64, postSet func()) { +func (e *Endpoint) OnSetReceiveBufferSize(rcvBufSz, oldSz int64) (newSz int64, postSet func()) { e.LockUser() sendNonZeroWindowUpdate := false @@ -1790,13 +1832,13 @@ func (e *endpoint) OnSetReceiveBufferSize(rcvBufSz, oldSz int64) (newSz int64, p } // OnSetSendBufferSize implements tcpip.SocketOptionsHandler.OnSetSendBufferSize. -func (e *endpoint) OnSetSendBufferSize(sz int64) int64 { +func (e *Endpoint) OnSetSendBufferSize(sz int64) int64 { e.sndQueueInfo.TCPSndBufState.AutoTuneSndBufDisabled.Store(1) return sz } // WakeupWriters implements tcpip.SocketOptionsHandler.WakeupWriters. -func (e *endpoint) WakeupWriters() { +func (e *Endpoint) WakeupWriters() { e.LockUser() defer e.UnlockUser() @@ -1811,7 +1853,7 @@ func (e *endpoint) WakeupWriters() { } // SetSockOptInt sets a socket option. -func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error { +func (e *Endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error { // Lower 2 bits represents ECN bits. RFC 3168, section 23.1 const inetECNMask = 3 @@ -1898,12 +1940,13 @@ func (e *endpoint) SetSockOptInt(opt tcpip.SockOptInt, v int) tcpip.Error { return nil } -func (e *endpoint) HasNIC(id int32) bool { +// HasNIC returns true if the NICID is defined in the stack or id is 0. +func (e *Endpoint) HasNIC(id int32) bool { return id == 0 || e.stack.HasNIC(tcpip.NICID(id)) } // SetSockOpt sets a socket option. -func (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error { +func (e *Endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error { switch v := opt.(type) { case *tcpip.KeepaliveIdleOption: e.LockUser() @@ -1996,7 +2039,7 @@ func (e *endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error { } // readyReceiveSize returns the number of bytes ready to be received. -func (e *endpoint) readyReceiveSize() (int, tcpip.Error) { +func (e *Endpoint) readyReceiveSize() (int, tcpip.Error) { e.LockUser() defer e.UnlockUser() @@ -2012,7 +2055,7 @@ func (e *endpoint) readyReceiveSize() (int, tcpip.Error) { } // GetSockOptInt implements tcpip.Endpoint.GetSockOptInt. -func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { +func (e *Endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { switch opt { case tcpip.KeepaliveCountOption: e.keepalive.Lock() @@ -2033,11 +2076,16 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { return v, nil case tcpip.MaxSegOption: - // This is just stubbed out. Linux never returns the user_mss - // value as it either returns the defaultMSS or returns the - // actual current MSS. Netstack just returns the defaultMSS - // always for now. + // Linux only returns user_mss value if user_mss is set and the socket is + // unconnected. Otherwise Linux returns the actual current MSS. Netstack + // mimics the user_mss behavior, but otherwise just returns the defaultMSS + // for now. v := header.TCPDefaultMSS + e.LockUser() + if state := e.EndpointState(); e.userMSS > 0 && (state.internal() || state == StateClose || state == StateListen) { + v = int(e.userMSS) + } + e.UnlockUser() return v, nil case tcpip.MTUDiscoverOption: @@ -2080,7 +2128,7 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { } } -func (e *endpoint) getTCPInfo() tcpip.TCPInfoOption { +func (e *Endpoint) getTCPInfo() tcpip.TCPInfoOption { info := tcpip.TCPInfoOption{} e.LockUser() if state := e.EndpointState(); state.internal() { @@ -2109,7 +2157,7 @@ func (e *endpoint) getTCPInfo() tcpip.TCPInfoOption { } // GetSockOpt implements tcpip.Endpoint.GetSockOpt. -func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error { +func (e *Endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error { switch o := opt.(type) { case *tcpip.TCPInfoOption: *o = e.getTCPInfo() @@ -2166,7 +2214,7 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error { // checkV4MappedLocked determines the effective network protocol and converts // addr to its canonical form. // +checklocks:e.mu -func (e *endpoint) checkV4MappedLocked(addr tcpip.FullAddress) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) { +func (e *Endpoint) checkV4MappedLocked(addr tcpip.FullAddress) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) { unwrapped, netProto, err := e.TransportEndpointInfo.AddrNetProtoLocked(addr, e.ops.GetV6Only()) if err != nil { return tcpip.FullAddress{}, 0, err @@ -2175,12 +2223,12 @@ func (e *endpoint) checkV4MappedLocked(addr tcpip.FullAddress) (tcpip.FullAddres } // Disconnect implements tcpip.Endpoint.Disconnect. -func (*endpoint) Disconnect() tcpip.Error { +func (*Endpoint) Disconnect() tcpip.Error { return &tcpip.ErrNotSupported{} } // Connect connects the endpoint to its peer. -func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error { +func (e *Endpoint) Connect(addr tcpip.FullAddress) tcpip.Error { e.LockUser() defer e.UnlockUser() err := e.connect(addr, true) @@ -2198,7 +2246,7 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error { // registerEndpoint registers the endpoint with the provided address. // // +checklocks:e.mu -func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.NetworkProtocolNumber, nicID tcpip.NICID) tcpip.Error { +func (e *Endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.NetworkProtocolNumber, nicID tcpip.NICID) tcpip.Error { netProtos := []tcpip.NetworkProtocolNumber{netProto} if e.TransportEndpointInfo.ID.LocalPort != 0 { // The endpoint is bound to a port, attempt to register it. @@ -2213,28 +2261,6 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo // endpoint would be trying to connect to itself). sameAddr := e.TransportEndpointInfo.ID.LocalAddress == e.TransportEndpointInfo.ID.RemoteAddress - // Calculate a port offset based on the destination IP/port and - // src IP to ensure that for a given tuple (srcIP, destIP, - // destPort) the offset used as a starting point is the same to - // ensure that we can cycle through the port space effectively. - portBuf := make([]byte, 2) - binary.LittleEndian.PutUint16(portBuf, e.ID.RemotePort) - - h := jenkins.Sum32(e.protocol.portOffsetSecret) - for _, s := range [][]byte{ - e.ID.LocalAddress.AsSlice(), - e.ID.RemoteAddress.AsSlice(), - portBuf, - } { - // Per io.Writer.Write: - // - // Write must return a non-nil error if it returns n < len(p). - if _, err := h.Write(s); err != nil { - panic(err) - } - } - portOffset := h.Sum32() - var twReuse tcpip.TCPTimeWaitReuseOption if err := e.stack.TransportProtocolOption(ProtocolNumber, &twReuse); err != nil { panic(fmt.Sprintf("e.stack.TransportProtocolOption(%d, %#v) = %s", ProtocolNumber, &twReuse, err)) @@ -2251,7 +2277,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo } bindToDevice := tcpip.NICID(e.ops.GetBindToDevice()) - if _, err := e.stack.PickEphemeralPortStable(portOffset, func(p uint16) (bool, tcpip.Error) { + if _, err := e.stack.PickEphemeralPort(e.stack.SecureRNG(), func(p uint16) (bool, tcpip.Error) { if sameAddr && p == e.TransportEndpointInfo.ID.RemotePort { return false, nil } @@ -2264,7 +2290,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo BindToDevice: bindToDevice, Dest: addr, } - if _, err := e.stack.ReservePort(e.stack.Rand(), portRes, nil /* testPort */); err != nil { + if _, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, nil /* testPort */); err != nil { if _, ok := err.(*tcpip.ErrPortInUse); !ok || !reuse { return false, nil } @@ -2285,7 +2311,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo return false, nil } - tcpEP := transEP.(*endpoint) + tcpEP := transEP.(*Endpoint) tcpEP.LockUser() // If the endpoint is not in TIME-WAIT or if it is in TIME-WAIT but // less than 1 second has elapsed since its recentTS was updated then @@ -2311,7 +2337,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo BindToDevice: bindToDevice, Dest: addr, } - if _, err := e.stack.ReservePort(e.stack.Rand(), portRes, nil /* testPort */); err != nil { + if _, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, nil /* testPort */); err != nil { return false, nil } } @@ -2353,7 +2379,7 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo // connect connects the endpoint to its peer. // +checklocks:e.mu -func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error { +func (e *Endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error { connectingAddr := addr.Addr addr, netProto, err := e.checkV4MappedLocked(addr) @@ -2466,13 +2492,13 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error { } // ConnectEndpoint is not supported. -func (*endpoint) ConnectEndpoint(tcpip.Endpoint) tcpip.Error { +func (*Endpoint) ConnectEndpoint(tcpip.Endpoint) tcpip.Error { return &tcpip.ErrInvalidEndpointState{} } // Shutdown closes the read and/or write end of the endpoint connection to its // peer. -func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error { +func (e *Endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error { e.LockUser() defer e.UnlockUser() @@ -2490,7 +2516,7 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error { } // +checklocks:e.mu -func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { +func (e *Endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { e.shutdownFlags |= flags switch { case e.EndpointState().connected(): @@ -2509,7 +2535,13 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { } // Wake up any readers that maybe waiting for the stream to become // readable. - e.waiterQueue.Notify(waiter.ReadableEvents) + events := waiter.ReadableEvents + if e.shutdownFlags&tcpip.ShutdownWrite == 0 { + // If ShutdownWrite is not set, write end won't close and + // we end up with a half-closed connection + events |= waiter.EventRdHUp + } + e.waiterQueue.Notify(events) } // Close for write. @@ -2566,18 +2598,18 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { // Listen puts the endpoint in "listen" mode, which allows it to accept // new connections. -func (e *endpoint) Listen(backlog int) tcpip.Error { - err := e.listen(backlog) - if err != nil { +func (e *Endpoint) Listen(backlog int) tcpip.Error { + if err := e.listen(backlog); err != nil { if !err.IgnoreStats() { e.stack.Stats().TCP.FailedConnectionAttempts.Increment() e.stats.FailedConnectionAttempts.Increment() } + return err } - return err + return nil } -func (e *endpoint) listen(backlog int) tcpip.Error { +func (e *Endpoint) listen(backlog int) tcpip.Error { e.LockUser() defer e.UnlockUser() @@ -2593,10 +2625,11 @@ func (e *endpoint) listen(backlog int) tcpip.Error { e.acceptQueue.capacity = backlog if e.acceptQueue.pendingEndpoints == nil { - e.acceptQueue.pendingEndpoints = make(map[*endpoint]struct{}) + e.acceptQueue.pendingEndpoints = make(map[*Endpoint]struct{}) } e.shutdownFlags = 0 + e.updateConnDirectionState(connDirectionStateOpen) e.rcvQueueMu.Lock() e.RcvClosed = false e.rcvQueueMu.Unlock() @@ -2637,7 +2670,7 @@ func (e *endpoint) listen(backlog int) tcpip.Error { // endpoints. e.acceptMu.Lock() if e.acceptQueue.pendingEndpoints == nil { - e.acceptQueue.pendingEndpoints = make(map[*endpoint]struct{}) + e.acceptQueue.pendingEndpoints = make(map[*Endpoint]struct{}) } if e.acceptQueue.capacity == 0 { e.acceptQueue.capacity = backlog @@ -2655,7 +2688,7 @@ func (e *endpoint) listen(backlog int) tcpip.Error { // to an endpoint previously set to listen mode. // // addr if not-nil will contain the peer address of the returned endpoint. -func (e *endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpip.Error) { +func (e *Endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpip.Error) { e.LockUser() defer e.UnlockUser() @@ -2668,10 +2701,10 @@ func (e *endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter. } // Get the new accepted endpoint. - var n *endpoint + var n *Endpoint e.acceptMu.Lock() if element := e.acceptQueue.endpoints.Front(); element != nil { - n = e.acceptQueue.endpoints.Remove(element).(*endpoint) + n = e.acceptQueue.endpoints.Remove(element).(*Endpoint) } e.acceptMu.Unlock() if n == nil { @@ -2684,7 +2717,7 @@ func (e *endpoint) Accept(peerAddr *tcpip.FullAddress) (tcpip.Endpoint, *waiter. } // Bind binds the endpoint to a specific local port and optionally address. -func (e *endpoint) Bind(addr tcpip.FullAddress) (err tcpip.Error) { +func (e *Endpoint) Bind(addr tcpip.FullAddress) (err tcpip.Error) { e.LockUser() defer e.UnlockUser() @@ -2692,7 +2725,7 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) (err tcpip.Error) { } // +checklocks:e.mu -func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) { +func (e *Endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) { // Don't allow binding once endpoint is not in the initial state // anymore. This is because once the endpoint goes into a connected or // listen state, it is already bound. @@ -2740,7 +2773,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) { BindToDevice: bindToDevice, Dest: tcpip.FullAddress{}, } - port, err := e.stack.ReservePort(e.stack.Rand(), portRes, func(p uint16) (bool, tcpip.Error) { + port, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, func(p uint16) (bool, tcpip.Error) { id := e.TransportEndpointInfo.ID id.LocalPort = p // CheckRegisterTransportEndpoint should only return an error if there is a @@ -2776,7 +2809,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) { } // GetLocalAddress returns the address to which the endpoint is bound. -func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) { +func (e *Endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) { e.LockUser() defer e.UnlockUser() @@ -2788,7 +2821,7 @@ func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, tcpip.Error) { } // GetRemoteAddress returns the address to which the endpoint is connected. -func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) { +func (e *Endpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) { e.LockUser() defer e.UnlockUser() @@ -2799,7 +2832,7 @@ func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, tcpip.Error) { return e.getRemoteAddress(), nil } -func (e *endpoint) getRemoteAddress() tcpip.FullAddress { +func (e *Endpoint) getRemoteAddress() tcpip.FullAddress { return tcpip.FullAddress{ Addr: e.TransportEndpointInfo.ID.RemoteAddress, Port: e.TransportEndpointInfo.ID.RemotePort, @@ -2807,14 +2840,15 @@ func (e *endpoint) getRemoteAddress() tcpip.FullAddress { } } -func (*endpoint) HandlePacket(stack.TransportEndpointID, stack.PacketBufferPtr) { +// HandlePacket implements stack.TransportEndpoint.HandlePacket. +func (*Endpoint) HandlePacket(stack.TransportEndpointID, *stack.PacketBuffer) { // TCP HandlePacket is not required anymore as inbound packets first // land at the Dispatcher which then can either deliver using the // worker go routine or directly do the invoke the tcp processing inline // based on the state of the endpoint. } -func (e *endpoint) enqueueSegment(s *segment) bool { +func (e *Endpoint) enqueueSegment(s *segment) bool { // Send packet to worker goroutine. if !e.segmentQueue.enqueue(s) { // The queue is full, so we drop the segment. @@ -2825,7 +2859,7 @@ func (e *endpoint) enqueueSegment(s *segment) bool { return true } -func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt stack.PacketBufferPtr) { +func (e *Endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt *stack.PacketBuffer) { // Update last error first. e.lastErrorMu.Lock() e.lastError = err @@ -2882,7 +2916,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p } // HandleError implements stack.TransportEndpoint. -func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBufferPtr) { +func (e *Endpoint) HandleError(transErr stack.TransportError, pkt *stack.PacketBuffer) { handlePacketTooBig := func(mtu uint32) { e.sndQueueInfo.sndQueueMu.Lock() update := false @@ -2924,7 +2958,7 @@ func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBu // updateSndBufferUsage is called by the protocol goroutine when room opens up // in the send buffer. The number of newly available bytes is v. -func (e *endpoint) updateSndBufferUsage(v int) { +func (e *Endpoint) updateSndBufferUsage(v int) { sendBufferSize := e.getSendBufferSize() e.sndQueueInfo.sndQueueMu.Lock() notify := e.sndQueueInfo.SndBufUsed >= sendBufferSize>>1 @@ -2952,7 +2986,7 @@ func (e *endpoint) updateSndBufferUsage(v int) { // s will be nil). // // +checklocks:e.mu -func (e *endpoint) readyToRead(s *segment) { +func (e *Endpoint) readyToRead(s *segment) { e.rcvQueueMu.Lock() if s != nil { e.RcvBufUsed += s.payloadSize() @@ -2968,7 +3002,7 @@ func (e *endpoint) readyToRead(s *segment) { // receiveBufferAvailableLocked calculates how many bytes are still available // in the receive buffer. // +checklocks:e.rcvQueueMu -func (e *endpoint) receiveBufferAvailableLocked(rcvBufSize int) int { +func (e *Endpoint) receiveBufferAvailableLocked(rcvBufSize int) int { // We may use more bytes than the buffer size when the receive buffer // shrinks. memUsed := e.receiveMemUsed() @@ -2982,7 +3016,7 @@ func (e *endpoint) receiveBufferAvailableLocked(rcvBufSize int) int { // receiveBufferAvailable calculates how many bytes are still available in the // receive buffer based on the actual memory used by all segments held in // receive buffer/pending and segment queue. -func (e *endpoint) receiveBufferAvailable() int { +func (e *Endpoint) receiveBufferAvailable() int { e.rcvQueueMu.Lock() available := e.receiveBufferAvailableLocked(int(e.ops.GetReceiveBufferSize())) e.rcvQueueMu.Unlock() @@ -2990,7 +3024,7 @@ func (e *endpoint) receiveBufferAvailable() int { } // receiveBufferUsed returns the amount of in-use receive buffer. -func (e *endpoint) receiveBufferUsed() int { +func (e *Endpoint) receiveBufferUsed() int { e.rcvQueueMu.Lock() used := e.RcvBufUsed e.rcvQueueMu.Unlock() @@ -2999,18 +3033,18 @@ func (e *endpoint) receiveBufferUsed() int { // receiveMemUsed returns the total memory in use by segments held by this // endpoint. -func (e *endpoint) receiveMemUsed() int { +func (e *Endpoint) receiveMemUsed() int { return int(e.rcvMemUsed.Load()) } // updateReceiveMemUsed adds the provided delta to e.rcvMemUsed. -func (e *endpoint) updateReceiveMemUsed(delta int) { +func (e *Endpoint) updateReceiveMemUsed(delta int) { e.rcvMemUsed.Add(int32(delta)) } // maxReceiveBufferSize returns the stack wide maximum receive buffer size for // an endpoint. -func (e *endpoint) maxReceiveBufferSize() int { +func (e *Endpoint) maxReceiveBufferSize() int { var rs tcpip.TCPReceiveBufferSizeRangeOption if err := e.stack.TransportProtocolOption(ProtocolNumber, &rs); err != nil { // As a fallback return the hardcoded max buffer size. @@ -3019,12 +3053,22 @@ func (e *endpoint) maxReceiveBufferSize() int { return rs.Max } +// directionState returns the close state of send and receive part of the endpoint +func (e *Endpoint) connDirectionState() connDirectionState { + return connDirectionState(e.connectionDirectionState.Load()) +} + +// updateDirectionState updates the close state of send and receive part of the endpoint +func (e *Endpoint) updateConnDirectionState(state connDirectionState) connDirectionState { + return connDirectionState(e.connectionDirectionState.Swap(uint32(e.connDirectionState() | state))) +} + // rcvWndScaleForHandshake computes the receive window scale to offer to the // peer when window scaling is enabled (true by default). If auto-tuning is // disabled then the window scaling factor is based on the size of the // receiveBuffer otherwise we use the max permissible receive buffer size to // compute the scale. -func (e *endpoint) rcvWndScaleForHandshake() int { +func (e *Endpoint) rcvWndScaleForHandshake() int { bufSizeForScale := e.ops.GetReceiveBufferSize() e.rcvQueueMu.Lock() @@ -3039,7 +3083,7 @@ func (e *endpoint) rcvWndScaleForHandshake() int { // updateRecentTimestamp updates the recent timestamp using the algorithm // described in https://tools.ietf.org/html/rfc7323#section-4.3 -func (e *endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value, segSeq seqnum.Value) { +func (e *Endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value, segSeq seqnum.Value) { if e.SendTSOk && seqnum.Value(e.recentTimestamp()).LessThan(seqnum.Value(tsVal)) && segSeq.LessThanEq(maxSentAck) { e.setRecentTimestamp(tsVal) } @@ -3048,29 +3092,29 @@ func (e *endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value, // maybeEnableTimestamp marks the timestamp option enabled for this endpoint if // the SYN options indicate that timestamp option was negotiated. It also // initializes the recentTS with the value provided in synOpts.TSval. -func (e *endpoint) maybeEnableTimestamp(synOpts header.TCPSynOptions) { +func (e *Endpoint) maybeEnableTimestamp(synOpts header.TCPSynOptions) { if synOpts.TS { e.SendTSOk = true e.setRecentTimestamp(synOpts.TSVal) } } -func (e *endpoint) tsVal(now tcpip.MonotonicTime) uint32 { +func (e *Endpoint) tsVal(now tcpip.MonotonicTime) uint32 { return e.TSOffset.TSVal(now) } -func (e *endpoint) tsValNow() uint32 { +func (e *Endpoint) tsValNow() uint32 { return e.tsVal(e.stack.Clock().NowMonotonic()) } -func (e *endpoint) elapsed(now tcpip.MonotonicTime, tsEcr uint32) time.Duration { +func (e *Endpoint) elapsed(now tcpip.MonotonicTime, tsEcr uint32) time.Duration { return e.TSOffset.Elapsed(now, tsEcr) } // maybeEnableSACKPermitted marks the SACKPermitted option enabled for this endpoint // if the SYN options indicate that the SACK option was negotiated and the TCP // stack is configured to enable TCP SACK option. -func (e *endpoint) maybeEnableSACKPermitted(synOpts header.TCPSynOptions) { +func (e *Endpoint) maybeEnableSACKPermitted(synOpts header.TCPSynOptions) { var v tcpip.TCPSACKEnabled if err := e.stack.TransportProtocolOption(ProtocolNumber, &v); err != nil { // Stack doesn't support SACK. So just return. @@ -3083,7 +3127,7 @@ func (e *endpoint) maybeEnableSACKPermitted(synOpts header.TCPSynOptions) { } // maxOptionSize return the maximum size of TCP options. -func (e *endpoint) maxOptionSize() (size int) { +func (e *Endpoint) maxOptionSize() (size int) { var maxSackBlocks [header.TCPMaxSACKBlocks]header.SACKBlock options := e.makeOptions(maxSackBlocks[:]) size = len(options) @@ -3096,7 +3140,7 @@ func (e *endpoint) maxOptionSize() (size int) { // used before invoking the probe. // // +checklocks:e.mu -func (e *endpoint) completeStateLocked(s *stack.TCPEndpointState) { +func (e *Endpoint) completeStateLocked(s *stack.TCPEndpointState) { s.TCPEndpointStateInner = e.TCPEndpointStateInner s.ID = stack.TCPEndpointID(e.TransportEndpointInfo.ID) s.SegTime = e.stack.Clock().NowMonotonic() @@ -3134,7 +3178,7 @@ func (e *endpoint) completeStateLocked(s *stack.TCPEndpointState) { s.Sender.SpuriousRecovery = e.snd.spuriousRecovery } -func (e *endpoint) initHostGSO() { +func (e *Endpoint) initHostGSO() { switch e.route.NetProto() { case header.IPv4ProtocolNumber: e.gso.Type = stack.GSOTCPv4 @@ -3150,7 +3194,7 @@ func (e *endpoint) initHostGSO() { e.gso.MaxSize = e.route.GSOMaxSize() } -func (e *endpoint) initGSO() { +func (e *Endpoint) initGSO() { if e.route.HasHostGSOCapability() { e.initHostGSO() } else if e.route.HasGvisorGSOCapability() { @@ -3164,12 +3208,12 @@ func (e *endpoint) initGSO() { // State implements tcpip.Endpoint.State. It exports the endpoint's protocol // state for diagnostics. -func (e *endpoint) State() uint32 { +func (e *Endpoint) State() uint32 { return uint32(e.EndpointState()) } // Info returns a copy of the endpoint info. -func (e *endpoint) Info() tcpip.EndpointInfo { +func (e *Endpoint) Info() tcpip.EndpointInfo { e.LockUser() // Make a copy of the endpoint info. ret := e.TransportEndpointInfo @@ -3178,12 +3222,12 @@ func (e *endpoint) Info() tcpip.EndpointInfo { } // Stats returns a pointer to the endpoint stats. -func (e *endpoint) Stats() tcpip.EndpointStats { +func (e *Endpoint) Stats() tcpip.EndpointStats { return &e.stats } // Wait implements stack.TransportEndpoint.Wait. -func (e *endpoint) Wait() { +func (e *Endpoint) Wait() { waitEntry, notifyCh := waiter.NewChannelEntry(waiter.EventHUp) e.waiterQueue.EventRegister(&waitEntry) defer e.waiterQueue.EventUnregister(&waitEntry) @@ -3195,17 +3239,15 @@ func (e *endpoint) Wait() { } // SocketOptions implements tcpip.Endpoint.SocketOptions. -func (e *endpoint) SocketOptions() *tcpip.SocketOptions { +func (e *Endpoint) SocketOptions() *tcpip.SocketOptions { return &e.ops } // GetTCPSendBufferLimits is used to get send buffer size limits for TCP. -func GetTCPSendBufferLimits(s tcpip.StackHandler) tcpip.SendBufferSizeOption { - var ss tcpip.TCPSendBufferSizeRangeOption - if err := s.TransportProtocolOption(header.TCPProtocolNumber, &ss); err != nil { - panic(fmt.Sprintf("s.TransportProtocolOption(%d, %#v) = %s", header.TCPProtocolNumber, ss, err)) - } - +func GetTCPSendBufferLimits(sh tcpip.StackHandler) tcpip.SendBufferSizeOption { + // This type assertion is safe because only the TCP stack calls this + // function. + ss := sh.(*stack.Stack).TCPSendBufferLimits() return tcpip.SendBufferSizeOption{ Min: ss.Min, Default: ss.Default, @@ -3214,7 +3256,7 @@ func GetTCPSendBufferLimits(s tcpip.StackHandler) tcpip.SendBufferSizeOption { } // allowOutOfWindowAck returns true if an out-of-window ACK can be sent now. -func (e *endpoint) allowOutOfWindowAck() bool { +func (e *Endpoint) allowOutOfWindowAck() bool { now := e.stack.Clock().NowMonotonic() if e.lastOutOfWindowAckTime != (tcpip.MonotonicTime{}) { @@ -3247,7 +3289,7 @@ func GetTCPReceiveBufferLimits(s tcpip.StackHandler) tcpip.ReceiveBufferSizeOpti // computeTCPSendBufferSize implements auto tuning of send buffer size and // returns the new send buffer size. -func (e *endpoint) computeTCPSendBufferSize() int64 { +func (e *Endpoint) computeTCPSendBufferSize() int64 { curSndBufSz := int64(e.getSendBufferSize()) // Auto tuning is disabled when the user explicitly sets the send @@ -3277,3 +3319,8 @@ func (e *endpoint) computeTCPSendBufferSize() int64 { return newSndBufSz } + +// GetAcceptConn implements tcpip.SocketOptionsHandler. +func (e *Endpoint) GetAcceptConn() bool { + return EndpointState(e.State()) == StateListen +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go index 8382b35b..5a6d2f6a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go @@ -15,6 +15,7 @@ package tcp import ( + "context" "fmt" "gvisor.dev/gvisor/pkg/atomicbitops" @@ -26,7 +27,7 @@ import ( ) // beforeSave is invoked by stateify. -func (e *endpoint) beforeSave() { +func (e *Endpoint) beforeSave() { // Stop incoming packets. e.segmentQueue.freeze() @@ -56,26 +57,28 @@ func (e *endpoint) beforeSave() { default: panic(fmt.Sprintf("endpoint in unknown state %v", e.EndpointState())) } + + e.stack.RegisterResumableEndpoint(e) } // saveEndpoints is invoked by stateify. -func (a *acceptQueue) saveEndpoints() []*endpoint { - acceptedEndpoints := make([]*endpoint, a.endpoints.Len()) +func (a *acceptQueue) saveEndpoints() []*Endpoint { + acceptedEndpoints := make([]*Endpoint, a.endpoints.Len()) for i, e := 0, a.endpoints.Front(); e != nil; i, e = i+1, e.Next() { - acceptedEndpoints[i] = e.Value.(*endpoint) + acceptedEndpoints[i] = e.Value.(*Endpoint) } return acceptedEndpoints } // loadEndpoints is invoked by stateify. -func (a *acceptQueue) loadEndpoints(acceptedEndpoints []*endpoint) { +func (a *acceptQueue) loadEndpoints(_ context.Context, acceptedEndpoints []*Endpoint) { for _, ep := range acceptedEndpoints { a.endpoints.PushBack(ep) } } // saveState is invoked by stateify. -func (e *endpoint) saveState() EndpointState { +func (e *Endpoint) saveState() EndpointState { return e.EndpointState() } @@ -89,7 +92,7 @@ var connectingLoading sync.WaitGroup // Bound endpoint loading happens last. // loadState is invoked by stateify. -func (e *endpoint) loadState(epState EndpointState) { +func (e *Endpoint) loadState(_ context.Context, epState EndpointState) { // This is to ensure that the loading wait groups include all applicable // endpoints before any asynchronous calls to the Wait() methods. // For restore purposes we treat TimeWait like a connected endpoint. @@ -109,24 +112,25 @@ func (e *endpoint) loadState(epState EndpointState) { } // afterLoad is invoked by stateify. -func (e *endpoint) afterLoad() { +func (e *Endpoint) afterLoad(ctx context.Context) { // RacyLoad() can be used because we are initializing e. e.origEndpointState = e.state.RacyLoad() // Restore the endpoint to InitialState as it will be moved to - // its origEndpointState during Resume. + // its origEndpointState during Restore. e.state = atomicbitops.FromUint32(uint32(StateInitial)) - stack.StackFromEnv.RegisterRestoredEndpoint(e) + stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e) } -// Resume implements tcpip.ResumableEndpoint.Resume. -func (e *endpoint) Resume(s *stack.Stack) { +// Restore implements tcpip.RestoredEndpoint.Restore. +func (e *Endpoint) Restore(s *stack.Stack) { if !e.EndpointState().closed() { - e.keepalive.timer.init(s.Clock(), maybeFailTimerHandler(e, e.keepaliveTimerExpired)) + e.keepalive.timer.init(s.Clock(), timerHandler(e, e.keepaliveTimerExpired)) } if snd := e.snd; snd != nil { - snd.resendTimer.init(s.Clock(), maybeFailTimerHandler(e, e.snd.retransmitTimerExpired)) + snd.resendTimer.init(s.Clock(), timerHandler(e, e.snd.retransmitTimerExpired)) snd.reorderTimer.init(s.Clock(), timerHandler(e, e.snd.rc.reorderTimerExpired)) snd.probeTimer.init(s.Clock(), timerHandler(e, e.snd.probeTimerExpired)) + snd.corkTimer.init(s.Clock(), timerHandler(e, e.snd.corkTimerExpired)) } e.stack = s e.protocol = protocolFromStack(s) @@ -193,6 +197,11 @@ func (e *endpoint) Resume(s *stack.Stack) { e.timeWaitTimer = e.stack.Clock().AfterFunc(e.getTimeWaitDuration(), e.timeWaitTimerExpired) } + if e.ops.GetCorkOption() { + // Rearm the timer if TCP_CORK is enabled which will + // drain all the segments in the queue after restore. + e.snd.corkTimer.enable(MinRTO) + } e.mu.Unlock() connectedLoading.Done() case epState == StateListen: @@ -243,7 +252,7 @@ func (e *endpoint) Resume(s *stack.Stack) { panic(fmt.Sprintf("FindRoute failed when restoring endpoint w/ ID: %+v", e.ID)) } e.route = r - timer, err := newBackoffTimer(e.stack.Clock(), InitialRTO, MaxRTO, maybeFailTimerHandler(e, e.h.retransmitHandlerLocked)) + timer, err := newBackoffTimer(e.stack.Clock(), InitialRTO, MaxRTO, timerHandler(e, e.h.retransmitHandlerLocked)) if err != nil { panic(fmt.Sprintf("newBackOffTimer(_, %s, %s, _) failed: %s", InitialRTO, MaxRTO, err)) } @@ -269,3 +278,8 @@ func (e *endpoint) Resume(s *stack.Stack) { tcpip.DeleteDanglingEndpoint(e) } } + +// Resume implements tcpip.ResumableEndpoint.Resume. +func (e *Endpoint) Resume() { + e.segmentQueue.thaw() +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go index 3d632939..39a52215 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go @@ -64,7 +64,7 @@ func NewForwarder(s *stack.Stack, rcvWnd, maxInFlight int, handler func(*Forward // // This function is expected to be passed as an argument to the // stack.SetTransportProtocolHandler function. -func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) bool { +func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool { s, err := newIncomingSegment(id, f.stack.Clock(), pkt) if err != nil { return false @@ -88,6 +88,7 @@ func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketB // Ignore the segment if we're beyond the limit. if len(f.inFlight) >= f.maxInFlight { + f.stack.Stats().TCP.ForwardMaxInFlightDrop.Increment() return true } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go index 81059d6a..8d53a9e8 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go @@ -16,13 +16,15 @@ package tcp import ( + "crypto/sha256" + "encoding/binary" + "fmt" "runtime" "strings" "time" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/hash/jenkins" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/header/parse" "gvisor.dev/gvisor/pkg/tcpip/internal/tcp" @@ -107,9 +109,8 @@ type protocol struct { dispatcher dispatcher // The following secrets are initialized once and stay unchanged after. - seqnumSecret uint32 - portOffsetSecret uint32 - tsOffsetSecret uint32 + seqnumSecret [16]byte + tsOffsetSecret [16]byte } // Number returns the tcp protocol number. @@ -144,7 +145,7 @@ func (*protocol) ParsePorts(v []byte) (src, dst uint16, err tcpip.Error) { // to a specific processing queue. Each queue is serviced by its own processor // goroutine which is responsible for dequeuing and doing full TCP dispatch of // the packet. -func (p *protocol) QueuePacket(ep stack.TransportEndpoint, id stack.TransportEndpointID, pkt stack.PacketBufferPtr) { +func (p *protocol) QueuePacket(ep stack.TransportEndpoint, id stack.TransportEndpointID, pkt *stack.PacketBuffer) { p.dispatcher.queuePacket(ep, id, p.stack.Clock(), pkt) } @@ -155,7 +156,7 @@ func (p *protocol) QueuePacket(ep stack.TransportEndpoint, id stack.TransportEnd // a reset is sent in response to any incoming segment except another reset. In // particular, SYNs addressed to a non-existent connection are rejected by this // means." -func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) stack.UnknownDestinationPacketDisposition { +func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition { s, err := newIncomingSegment(id, p.stack.Clock(), pkt) if err != nil { return stack.UnknownDestinationPacketMalformed @@ -178,16 +179,15 @@ func (p *protocol) tsOffset(src, dst tcpip.Address) tcp.TSOffset { // // See https://tools.ietf.org/html/rfc7323#section-5.4 for details on // why this is required. - // - // TODO(https://gvisor.dev/issues/6473): This is not really secure as - // it does not use the recommended algorithm linked above. - h := jenkins.Sum32(p.tsOffsetSecret) + h := sha256.New() + // Per hash.Hash.Writer: // // It never returns an error. + _, _ = h.Write(p.tsOffsetSecret[:]) _, _ = h.Write(src.AsSlice()) _, _ = h.Write(dst.AsSlice()) - return tcp.NewTSOffset(h.Sum32()) + return tcp.NewTSOffset(binary.LittleEndian.Uint32(h.Sum(nil)[:4])) } // replyWithReset replies to the given segment with a reset segment. @@ -480,6 +480,13 @@ func (p *protocol) Option(option tcpip.GettableTransportProtocolOption) tcpip.Er } } +// SendBufferSize implements stack.SendBufSizeProto. +func (p *protocol) SendBufferSize() tcpip.TCPSendBufferSizeRangeOption { + p.mu.RLock() + defer p.mu.RUnlock() + return p.sendBufferSize +} + // Close implements stack.TransportProtocol.Close. func (p *protocol) Close() { p.dispatcher.close() @@ -501,12 +508,21 @@ func (p *protocol) Resume() { } // Parse implements stack.TransportProtocol.Parse. -func (*protocol) Parse(pkt stack.PacketBufferPtr) bool { +func (*protocol) Parse(pkt *stack.PacketBuffer) bool { return parse.TCP(pkt) } // NewProtocol returns a TCP transport protocol. func NewProtocol(s *stack.Stack) stack.TransportProtocol { + rng := s.SecureRNG() + var seqnumSecret [16]byte + var tsOffsetSecret [16]byte + if n, err := rng.Reader.Read(seqnumSecret[:]); err != nil || n != len(seqnumSecret) { + panic(fmt.Sprintf("Read() failed: %v", err)) + } + if n, err := rng.Reader.Read(tsOffsetSecret[:]); err != nil || n != len(tsOffsetSecret) { + panic(fmt.Sprintf("Read() failed: %v", err)) + } p := protocol{ stack: s, sendBufferSize: tcpip.TCPSendBufferSizeRangeOption{ @@ -530,11 +546,10 @@ func NewProtocol(s *stack.Stack) stack.TransportProtocol { maxRTO: MaxRTO, maxRetries: MaxRetries, recovery: tcpip.TCPRACKLossDetection, - seqnumSecret: s.Rand().Uint32(), - portOffsetSecret: s.Rand().Uint32(), - tsOffsetSecret: s.Rand().Uint32(), + seqnumSecret: seqnumSecret, + tsOffsetSecret: tsOffsetSecret, } - p.dispatcher.init(s.Rand(), runtime.GOMAXPROCS(0)) + p.dispatcher.init(s.InsecureRNG(), runtime.GOMAXPROCS(0)) return &p } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go index 7dccc956..66ea6e5b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rack.go @@ -188,9 +188,9 @@ func (s *sender) schedulePTO() { // https://tools.ietf.org/html/draft-ietf-tcpm-rack-08#section-7.5.2. // // +checklocks:s.ep.mu -func (s *sender) probeTimerExpired() { - if s.probeTimer.isZero() || !s.probeTimer.checkExpiration() { - return +func (s *sender) probeTimerExpired() tcpip.Error { + if s.probeTimer.isUninitialized() || !s.probeTimer.checkExpiration() { + return nil } var dataSent bool @@ -231,7 +231,7 @@ func (s *sender) probeTimerExpired() { // not the probe timer. This ensures that the sender does not send repeated, // back-to-back tail loss probes. s.postXmit(dataSent, false /* shouldScheduleProbe */) - return + return nil } // detectTLPRecovery detects if recovery was accomplished by the loss probes @@ -388,14 +388,14 @@ func (rc *rackControl) detectLoss(rcvTime tcpip.MonotonicTime) int { // before the reorder timer expired. // // +checklocks:rc.snd.ep.mu -func (rc *rackControl) reorderTimerExpired() { - if rc.snd.reorderTimer.isZero() || !rc.snd.reorderTimer.checkExpiration() { - return +func (rc *rackControl) reorderTimerExpired() tcpip.Error { + if rc.snd.reorderTimer.isUninitialized() || !rc.snd.reorderTimer.checkExpiration() { + return nil } numLost := rc.detectLoss(rc.snd.ep.stack.Clock().NowMonotonic()) if numLost == 0 { - return + return nil } fastRetransmit := false @@ -406,7 +406,7 @@ func (rc *rackControl) reorderTimerExpired() { } rc.DoRecovery(nil, fastRetransmit) - return + return nil } // DoRecovery implements lossRecovery.DoRecovery. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go index 98e4d12f..349f950f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go @@ -30,7 +30,7 @@ import ( // +stateify savable type receiver struct { stack.TCPReceiverState - ep *endpoint + ep *Endpoint // rcvWnd is the non-scaled receive window last advertised to the peer. rcvWnd seqnum.Size @@ -52,7 +52,7 @@ type receiver struct { lastRcvdAckTime tcpip.MonotonicTime } -func newReceiver(ep *endpoint, irs seqnum.Value, rcvWnd seqnum.Size, rcvWndScale uint8) *receiver { +func newReceiver(ep *Endpoint, irs seqnum.Value, rcvWnd seqnum.Size, rcvWndScale uint8) *receiver { return &receiver{ ep: ep, TCPReceiverState: stack.TCPReceiverState{ @@ -293,6 +293,7 @@ func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum r.pendingRcvdSegments[i] = nil } r.pendingRcvdSegments = r.pendingRcvdSegments[:first] + r.ep.updateConnDirectionState(connDirectionStateRcvClosed) return true } @@ -480,13 +481,26 @@ func (r *receiver) handleRcvdSegment(s *segment) (drop bool, err tcpip.Error) { // Defer segment processing if it can't be consumed now. if !r.consumeSegment(s, segSeq, segLen) { if segLen > 0 || s.flags.Contains(header.TCPFlagFin) { - // We only store the segment if it's within our buffer size limit. + // We only store the segment if it's within our buffer + // size limit. // - // Only use 75% of the receive buffer queue for out-of-order - // segments. This ensures that we always leave some space for the inorder - // segments to arrive allowing pending segments to be processed and + // Only use 75% of the receive buffer queue for + // out-of-order segments. This ensures that we always + // leave some space for the inorder segments to arrive + // allowing pending segments to be processed and // delivered to the user. - if rcvBufSize := r.ep.ops.GetReceiveBufferSize(); rcvBufSize > 0 && (r.PendingBufUsed+int(segLen)) < int(rcvBufSize)>>2 { + // + // The ratio must be at least 50% (the size of rwnd) to + // leave space for retransmitted dropped packets. 51% + // would make recovery slow when there are multiple + // drops by necessitating multiple round trips. 100% + // would enable the buffer to be totally full of + // out-of-order data and stall the connection. + // + // An ideal solution is to ensure that there are at + // least N bytes free when N bytes are missing, but we + // don't have that computed at this point in the stack. + if rcvBufSize := r.ep.ops.GetReceiveBufferSize(); rcvBufSize > 0 && (r.PendingBufUsed+int(segLen)) < int(rcvBufSize-rcvBufSize/4) { r.ep.rcvQueueMu.Lock() r.PendingBufUsed += s.segMemSize() r.ep.rcvQueueMu.Unlock() @@ -540,7 +554,7 @@ func (r *receiver) handleTimeWaitSegment(s *segment) (resetTimeWait bool, newSyn segLen := seqnum.Size(s.payloadSize()) // Just silently drop any RST packets in TIME_WAIT. We do not support - // TIME_WAIT assasination as a result we confirm w/ fix 1 as described + // TIME_WAIT assassination as a result we confirm w/ fix 1 as described // in https://tools.ietf.org/html/rfc1337#section-3. // // This behavior overrides RFC793 page 70 where we transition to CLOSED diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go index df520658..6de583da 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go @@ -55,11 +55,11 @@ type segment struct { segmentEntry segmentRefs - ep *endpoint + ep *Endpoint qFlags queueFlags id stack.TransportEndpointID `state:"manual"` - pkt stack.PacketBufferPtr + pkt *stack.PacketBuffer sequenceNumber seqnum.Value ackNumber seqnum.Value @@ -92,15 +92,29 @@ type segment struct { lost bool } -func newIncomingSegment(id stack.TransportEndpointID, clock tcpip.Clock, pkt stack.PacketBufferPtr) (*segment, error) { +func newIncomingSegment(id stack.TransportEndpointID, clock tcpip.Clock, pkt *stack.PacketBuffer) (*segment, error) { hdr := header.TCP(pkt.TransportHeader().Slice()) - netHdr := pkt.Network() + var srcAddr tcpip.Address + var dstAddr tcpip.Address + switch netProto := pkt.NetworkProtocolNumber; netProto { + case header.IPv4ProtocolNumber: + hdr := header.IPv4(pkt.NetworkHeader().Slice()) + srcAddr = hdr.SourceAddress() + dstAddr = hdr.DestinationAddress() + case header.IPv6ProtocolNumber: + hdr := header.IPv6(pkt.NetworkHeader().Slice()) + srcAddr = hdr.SourceAddress() + dstAddr = hdr.DestinationAddress() + default: + panic(fmt.Sprintf("unknown network protocol number %d", netProto)) + } + csum, csumValid, ok := header.TCPValid( hdr, func() uint16 { return pkt.Data().Checksum() }, uint16(pkt.Data().Size()), - netHdr.SourceAddress(), - netHdr.DestinationAddress(), + srcAddr, + dstAddr, pkt.RXChecksumValidated) if !ok { return nil, fmt.Errorf("header data offset does not respect size constraints: %d < offset < %d, got offset=%d", header.TCPMinimumSize, len(hdr), hdr.DataOffset()) @@ -168,7 +182,7 @@ func (s *segment) merge(oth *segment) { // setOwner sets the owning endpoint for this segment. Its required // to be called to ensure memory accounting for receive/send buffer // queues is done properly. -func (s *segment) setOwner(ep *endpoint, qFlags queueFlags) { +func (s *segment) setOwner(ep *Endpoint, qFlags queueFlags) { switch qFlags { case recvQ: ep.updateReceiveMemUsed(s.segMemSize()) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go index 53839387..6f003efc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go @@ -24,7 +24,7 @@ import ( type segmentQueue struct { mu sync.Mutex `state:"nosave"` list segmentList `state:"wait"` - ep *endpoint + ep *Endpoint frozen bool } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go index 57bbd69f..76ab5629 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_state.go @@ -14,6 +14,10 @@ package tcp +import ( + "context" +) + // saveOptions is invoked by stateify. func (s *segment) saveOptions() []byte { // We cannot save s.options directly as it may point to s.data's trimmed @@ -23,7 +27,7 @@ func (s *segment) saveOptions() []byte { } // loadOptions is invoked by stateify. -func (s *segment) loadOptions(options []byte) { +func (s *segment) loadOptions(_ context.Context, options []byte) { // NOTE: We cannot point s.options back into s.data's trimmed tail. But // it is OK as they do not need to aliased. Plus, options is already // allocated so there is no cost here. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go index 9c730691..41b5b534 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go @@ -88,7 +88,7 @@ type lossRecovery interface { // +stateify savable type sender struct { stack.TCPSenderState - ep *endpoint + ep *Endpoint // lr is the loss recovery algorithm used by the sender. lr lossRecovery @@ -151,6 +151,13 @@ type sender struct { // segment after entering an RTO for the first time as described in // RFC3522 Section 3.2. retransmitTS uint32 + + // startCork start corking the segments. + startCork bool + + // corkTimer is used to drain the segments which are held when TCP_CORK + // option is enabled. + corkTimer timer `state:"nosave"` } // rtt is a synchronization wrapper used to appease stateify. See the comment @@ -164,7 +171,7 @@ type rtt struct { } // +checklocks:ep.mu -func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint16, sndWndScale int) *sender { +func newSender(ep *Endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint16, sndWndScale int) *sender { // The sender MUST reduce the TCP data length to account for any IP or // TCP options that it is including in the packets that it sends. // See: https://tools.ietf.org/html/rfc6691#section-2 @@ -205,9 +212,10 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint s.SndWndScale = uint8(sndWndScale) } - s.resendTimer.init(s.ep.stack.Clock(), maybeFailTimerHandler(s.ep, s.retransmitTimerExpired)) + s.resendTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.retransmitTimerExpired)) s.reorderTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.rc.reorderTimerExpired)) s.probeTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.probeTimerExpired)) + s.corkTimer.init(s.ep.stack.Clock(), timerHandler(s.ep, s.corkTimerExpired)) s.ep.AssertLockHeld(ep) s.updateMaxPayloadSize(int(ep.route.MTU()), 0) @@ -437,7 +445,7 @@ func (s *sender) resendSegment() { func (s *sender) retransmitTimerExpired() tcpip.Error { // Check if the timer actually expired or if it's a spurious wake due // to a previously orphaned runtime timer. - if s.resendTimer.isZero() || !s.resendTimer.checkExpiration() { + if s.resendTimer.isUninitialized() || !s.resendTimer.checkExpiration() { return nil } @@ -646,12 +654,12 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt // 1. If there exists a smallest unSACKED sequence number // 'S2' that meets the following 3 criteria for determinig // loss, the sequence range of one segment of up to SMSS - // octects starting with S2 MUST be returned. + // octets starting with S2 MUST be returned. if !s.ep.scoreboard.IsSACKED(header.SACKBlock{Start: segSeq, End: segSeq.Add(1)}) { // NextSeg(): // // (1.a) S2 is greater than HighRxt - // (1.b) S2 is less than highest octect covered by + // (1.b) S2 is less than highest octet covered by // any received SACK. if s.FastRecovery.HighRxt.LessThan(segSeq) && segSeq.LessThan(s.ep.scoreboard.maxSACKED) { // NextSeg(): @@ -682,7 +690,7 @@ func (s *sender) NextSeg(nextSegHint *segment) (nextSeg, hint *segment, rescueRt // retransmission per entry into loss recovery. If // HighACK is greater than RescueRxt (or RescueRxt // is undefined), then one segment of upto SMSS - // octects that MUST include the highest outstanding + // octets that MUST include the highest outstanding // unSACKed sequence number SHOULD be returned, and // RescueRxt set to RecoveryPoint. HighRxt MUST NOT // be updated. @@ -776,10 +784,20 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se } // With TCP_CORK, hold back until minimum of the available // send space and MSS. - // TODO(gvisor.dev/issue/2833): Drain the held segments after a - // timeout. - if seg.payloadSize() < s.MaxPayloadSize && s.ep.ops.GetCorkOption() { - return false + if s.ep.ops.GetCorkOption() { + if seg.payloadSize() < s.MaxPayloadSize { + if !s.startCork { + s.startCork = true + // Enable the timer for + // 200ms, after which + // the segments are drained. + s.corkTimer.enable(MinRTO) + } + return false + } + // Disable the TCP_CORK timer. + s.startCork = false + s.corkTimer.disable() } } } @@ -799,6 +817,7 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se segEnd = seg.sequenceNumber.Add(1) // Update the state to reflect that we have now // queued a FIN. + s.ep.updateConnDirectionState(connDirectionStateSndClosed) switch s.ep.EndpointState() { case StateCloseWait: s.ep.setEndpointState(StateLastAck) @@ -821,7 +840,7 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se } // If the whole segment or at least 1MSS sized segment cannot - // be accomodated in the receiver advertized window, skip + // be accommodated in the receiver advertised window, skip // splitting and sending of the segment. ref: // net/ipv4/tcp_output.c::tcp_snd_wnd_test() // @@ -920,7 +939,7 @@ func (s *sender) postXmit(dataSent bool, shouldScheduleProbe bool) { s.ep.disableKeepaliveTimer() } - // If the sender has advertized zero receive window and we have + // If the sender has advertised zero receive window and we have // data to be sent out, start zero window probing to query the // the remote for it's receive window size. if s.writeNext != nil && s.SndWnd == 0 { @@ -952,7 +971,7 @@ func (s *sender) postXmit(dataSent bool, shouldScheduleProbe bool) { func (s *sender) sendData() { limit := s.MaxPayloadSize if s.gso { - limit = int(s.ep.gso.MaxSize - header.TCPHeaderMaximumSize) + limit = int(s.ep.gso.MaxSize - header.TCPTotalHeaderMaximumSize - 1) } end := s.SndUna.Add(s.SndWnd) @@ -1090,7 +1109,7 @@ func (s *sender) SetPipe() { // // NOTE: here we mark the whole segment as lost. We do not try // and test every byte in our write buffer as we maintain our - // pipe in terms of oustanding packets and not bytes. + // pipe in terms of outstanding packets and not bytes. if !s.ep.scoreboard.IsRangeLost(sb) { pipe++ } @@ -1452,7 +1471,7 @@ func (s *sender) handleRcvdSegment(rcvdSeg *segment) { // Stash away the current window size. s.SndWnd = rcvdSeg.window - // Disable zero window probing if remote advertizes a non-zero receive + // Disable zero window probing if remote advertises a non-zero receive // window. This can be with an ACK to the zero window probe (where the // acknumber refers to the already acknowledged byte) OR to any previously // unacknowledged segment. @@ -1667,7 +1686,7 @@ func (s *sender) sendSegment(seg *segment) tcpip.Error { // flags and sequence number. // +checklocks:s.ep.mu // +checklocksalias:s.ep.rcv.ep.mu=s.ep.mu -func (s *sender) sendSegmentFromPacketBuffer(pkt stack.PacketBufferPtr, flags header.TCPFlags, seq seqnum.Value) tcpip.Error { +func (s *sender) sendSegmentFromPacketBuffer(pkt *stack.PacketBuffer, flags header.TCPFlags, seq seqnum.Value) tcpip.Error { s.LastSendTime = s.ep.stack.Clock().NowMonotonic() if seq == s.RTTMeasureSeqNum { s.RTTMeasureTime = s.LastSendTime @@ -1723,3 +1742,24 @@ func (s *sender) updateWriteNext(seg *segment) { } s.writeNext = seg } + +// corkTimerExpired drains all the segments when TCP_CORK is enabled. +// +checklocks:s.ep.mu +func (s *sender) corkTimerExpired() tcpip.Error { + // Check if the timer actually expired or if it's a spurious wake due + // to a previously orphaned runtime timer. + if s.corkTimer.isUninitialized() || !s.corkTimer.checkExpiration() { + return nil + } + + // Assign sequence number and flags to the segment. + seg := s.writeNext + if seg == nil { + return nil + } + seg.sequenceNumber = s.SndNxt + seg.flags = header.TCPFlagAck | header.TCPFlagPsh + // Drain all the segments. + s.sendData() + return nil +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go index 3129cf2b..67bfa999 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_endpoint_list.go @@ -13,7 +13,7 @@ type endpointElementMapper struct{} // This default implementation should be inlined. // //go:nosplit -func (endpointElementMapper) linkerFor(elem *endpoint) *endpoint { return elem } +func (endpointElementMapper) linkerFor(elem *Endpoint) *Endpoint { return elem } // List is an intrusive list. Entries can be added to or removed from the list // in O(1) time and with no additional memory allocations. @@ -28,8 +28,8 @@ func (endpointElementMapper) linkerFor(elem *endpoint) *endpoint { return elem } // // +stateify savable type endpointList struct { - head *endpoint - tail *endpoint + head *Endpoint + tail *Endpoint } // Reset resets list l to the empty state. @@ -48,14 +48,14 @@ func (l *endpointList) Empty() bool { // Front returns the first element of list l or nil. // //go:nosplit -func (l *endpointList) Front() *endpoint { +func (l *endpointList) Front() *Endpoint { return l.head } // Back returns the last element of list l or nil. // //go:nosplit -func (l *endpointList) Back() *endpoint { +func (l *endpointList) Back() *Endpoint { return l.tail } @@ -74,7 +74,7 @@ func (l *endpointList) Len() (count int) { // PushFront inserts the element e at the front of list l. // //go:nosplit -func (l *endpointList) PushFront(e *endpoint) { +func (l *endpointList) PushFront(e *Endpoint) { linker := endpointElementMapper{}.linkerFor(e) linker.SetNext(l.head) linker.SetPrev(nil) @@ -107,7 +107,7 @@ func (l *endpointList) PushFrontList(m *endpointList) { // PushBack inserts the element e at the back of list l. // //go:nosplit -func (l *endpointList) PushBack(e *endpoint) { +func (l *endpointList) PushBack(e *Endpoint) { linker := endpointElementMapper{}.linkerFor(e) linker.SetNext(nil) linker.SetPrev(l.tail) @@ -140,7 +140,7 @@ func (l *endpointList) PushBackList(m *endpointList) { // InsertAfter inserts e after b. // //go:nosplit -func (l *endpointList) InsertAfter(b, e *endpoint) { +func (l *endpointList) InsertAfter(b, e *Endpoint) { bLinker := endpointElementMapper{}.linkerFor(b) eLinker := endpointElementMapper{}.linkerFor(e) @@ -160,7 +160,7 @@ func (l *endpointList) InsertAfter(b, e *endpoint) { // InsertBefore inserts e before a. // //go:nosplit -func (l *endpointList) InsertBefore(a, e *endpoint) { +func (l *endpointList) InsertBefore(a, e *Endpoint) { aLinker := endpointElementMapper{}.linkerFor(a) eLinker := endpointElementMapper{}.linkerFor(e) @@ -179,7 +179,7 @@ func (l *endpointList) InsertBefore(a, e *endpoint) { // Remove removes e from l. // //go:nosplit -func (l *endpointList) Remove(e *endpoint) { +func (l *endpointList) Remove(e *Endpoint) { linker := endpointElementMapper{}.linkerFor(e) prev := linker.Prev() next := linker.Next() @@ -206,34 +206,34 @@ func (l *endpointList) Remove(e *endpoint) { // // +stateify savable type endpointEntry struct { - next *endpoint - prev *endpoint + next *Endpoint + prev *Endpoint } // Next returns the entry that follows e in the list. // //go:nosplit -func (e *endpointEntry) Next() *endpoint { +func (e *endpointEntry) Next() *Endpoint { return e.next } // Prev returns the entry that precedes e in the list. // //go:nosplit -func (e *endpointEntry) Prev() *endpoint { +func (e *endpointEntry) Prev() *Endpoint { return e.prev } // SetNext assigns 'entry' as the entry that follows e in the list. // //go:nosplit -func (e *endpointEntry) SetNext(elem *endpoint) { +func (e *endpointEntry) SetNext(elem *Endpoint) { e.next = elem } // SetPrev assigns 'entry' as the entry that precedes e in the list. // //go:nosplit -func (e *endpointEntry) SetPrev(elem *endpoint) { +func (e *endpointEntry) SetPrev(elem *Endpoint) { e.prev = elem } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go index e40251b2..a06b3f35 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go @@ -1,6 +1,7 @@ package tcp import ( + "context" "fmt" "gvisor.dev/gvisor/pkg/atomicbitops" @@ -134,7 +135,7 @@ func (r *segmentRefs) DecRef(destroy func()) { } } -func (r *segmentRefs) afterLoad() { +func (r *segmentRefs) afterLoad(context.Context) { if r.ReadRefs() > 0 { refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go index 912b12a2..f9eb5459 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go @@ -3,6 +3,8 @@ package tcp import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -23,20 +25,20 @@ func (a *acceptQueue) beforeSave() {} // +checklocksignore func (a *acceptQueue) StateSave(stateSinkObject state.Sink) { a.beforeSave() - var endpointsValue []*endpoint + var endpointsValue []*Endpoint endpointsValue = a.saveEndpoints() stateSinkObject.SaveValue(0, endpointsValue) stateSinkObject.Save(1, &a.pendingEndpoints) stateSinkObject.Save(2, &a.capacity) } -func (a *acceptQueue) afterLoad() {} +func (a *acceptQueue) afterLoad(context.Context) {} // +checklocksignore -func (a *acceptQueue) StateLoad(stateSourceObject state.Source) { +func (a *acceptQueue) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(1, &a.pendingEndpoints) stateSourceObject.Load(2, &a.capacity) - stateSourceObject.LoadValue(0, new([]*endpoint), func(y any) { a.loadEndpoints(y.([]*endpoint)) }) + stateSourceObject.LoadValue(0, new([]*Endpoint), func(y any) { a.loadEndpoints(ctx, y.([]*Endpoint)) }) } func (h *handshake) StateTypeName() string { @@ -89,10 +91,10 @@ func (h *handshake) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(16, &h.sampleRTTWithTSOnly) } -func (h *handshake) afterLoad() {} +func (h *handshake) afterLoad(context.Context) {} // +checklocksignore -func (h *handshake) StateLoad(stateSourceObject state.Source) { +func (h *handshake) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &h.ep) stateSourceObject.Load(1, &h.listenEP) stateSourceObject.Load(2, &h.state) @@ -134,10 +136,10 @@ func (c *cubicState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &c.s) } -func (c *cubicState) afterLoad() {} +func (c *cubicState) afterLoad(context.Context) {} // +checklocksignore -func (c *cubicState) StateLoad(stateSourceObject state.Source) { +func (c *cubicState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &c.TCPCubicState) stateSourceObject.Load(1, &c.numCongestionEvents) stateSourceObject.Load(2, &c.s) @@ -163,10 +165,10 @@ func (s *SACKInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &s.NumBlocks) } -func (s *SACKInfo) afterLoad() {} +func (s *SACKInfo) afterLoad(context.Context) {} // +checklocksignore -func (s *SACKInfo) StateLoad(stateSourceObject state.Source) { +func (s *SACKInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.Blocks) stateSourceObject.Load(1, &s.NumBlocks) } @@ -201,10 +203,10 @@ func (r *ReceiveErrors) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(6, &r.WantZeroRcvWindow) } -func (r *ReceiveErrors) afterLoad() {} +func (r *ReceiveErrors) afterLoad(context.Context) {} // +checklocksignore -func (r *ReceiveErrors) StateLoad(stateSourceObject state.Source) { +func (r *ReceiveErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.ReceiveErrors) stateSourceObject.Load(1, &r.SegmentQueueDropped) stateSourceObject.Load(2, &r.ChecksumErrors) @@ -242,10 +244,10 @@ func (s *SendErrors) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &s.Timeouts) } -func (s *SendErrors) afterLoad() {} +func (s *SendErrors) afterLoad(context.Context) {} // +checklocksignore -func (s *SendErrors) StateLoad(stateSourceObject state.Source) { +func (s *SendErrors) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.SendErrors) stateSourceObject.Load(1, &s.SegmentSendToNetworkFailed) stateSourceObject.Load(2, &s.SynSendToNetworkFailed) @@ -284,10 +286,10 @@ func (s *Stats) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(6, &s.WriteErrors) } -func (s *Stats) afterLoad() {} +func (s *Stats) afterLoad(context.Context) {} // +checklocksignore -func (s *Stats) StateLoad(stateSourceObject state.Source) { +func (s *Stats) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.SegmentsReceived) stateSourceObject.Load(1, &s.SegmentsSent) stateSourceObject.Load(2, &s.FailedConnectionAttempts) @@ -315,18 +317,18 @@ func (sq *sndQueueInfo) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &sq.TCPSndBufState) } -func (sq *sndQueueInfo) afterLoad() {} +func (sq *sndQueueInfo) afterLoad(context.Context) {} // +checklocksignore -func (sq *sndQueueInfo) StateLoad(stateSourceObject state.Source) { +func (sq *sndQueueInfo) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &sq.TCPSndBufState) } -func (e *endpoint) StateTypeName() string { - return "pkg/tcpip/transport/tcp.endpoint" +func (e *Endpoint) StateTypeName() string { + return "pkg/tcpip/transport/tcp.Endpoint" } -func (e *endpoint) StateFields() []string { +func (e *Endpoint) StateFields() []string { return []string{ "TCPEndpointStateInner", "TransportEndpointInfo", @@ -340,6 +342,7 @@ func (e *endpoint) StateFields() []string { "ownedByUser", "rcvQueue", "state", + "connectionDirectionState", "boundNICID", "ipv4TTL", "ipv6HopLimit", @@ -383,7 +386,7 @@ func (e *endpoint) StateFields() []string { } // +checklocksignore -func (e *endpoint) StateSave(stateSinkObject state.Sink) { +func (e *Endpoint) StateSave(stateSinkObject state.Sink) { e.beforeSave() var stateValue EndpointState stateValue = e.saveState() @@ -399,49 +402,50 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(8, &e.rcvMemUsed) stateSinkObject.Save(9, &e.ownedByUser) stateSinkObject.Save(10, &e.rcvQueue) - stateSinkObject.Save(12, &e.boundNICID) - stateSinkObject.Save(13, &e.ipv4TTL) - stateSinkObject.Save(14, &e.ipv6HopLimit) - stateSinkObject.Save(15, &e.isConnectNotified) - stateSinkObject.Save(16, &e.h) - stateSinkObject.Save(17, &e.portFlags) - stateSinkObject.Save(18, &e.boundBindToDevice) - stateSinkObject.Save(19, &e.boundPortFlags) - stateSinkObject.Save(20, &e.boundDest) - stateSinkObject.Save(21, &e.effectiveNetProtos) - stateSinkObject.Save(22, &e.recentTSTime) - stateSinkObject.Save(23, &e.shutdownFlags) - stateSinkObject.Save(24, &e.tcpRecovery) - stateSinkObject.Save(25, &e.sack) - stateSinkObject.Save(26, &e.delay) - stateSinkObject.Save(27, &e.scoreboard) - stateSinkObject.Save(28, &e.segmentQueue) - stateSinkObject.Save(29, &e.userMSS) - stateSinkObject.Save(30, &e.maxSynRetries) - stateSinkObject.Save(31, &e.windowClamp) - stateSinkObject.Save(32, &e.sndQueueInfo) - stateSinkObject.Save(33, &e.cc) - stateSinkObject.Save(34, &e.keepalive) - stateSinkObject.Save(35, &e.userTimeout) - stateSinkObject.Save(36, &e.deferAccept) - stateSinkObject.Save(37, &e.acceptQueue) - stateSinkObject.Save(38, &e.rcv) - stateSinkObject.Save(39, &e.snd) - stateSinkObject.Save(40, &e.connectingAddress) - stateSinkObject.Save(41, &e.amss) - stateSinkObject.Save(42, &e.sendTOS) - stateSinkObject.Save(43, &e.gso) - stateSinkObject.Save(44, &e.stats) - stateSinkObject.Save(45, &e.tcpLingerTimeout) - stateSinkObject.Save(46, &e.closed) - stateSinkObject.Save(47, &e.txHash) - stateSinkObject.Save(48, &e.owner) - stateSinkObject.Save(49, &e.ops) - stateSinkObject.Save(50, &e.lastOutOfWindowAckTime) + stateSinkObject.Save(12, &e.connectionDirectionState) + stateSinkObject.Save(13, &e.boundNICID) + stateSinkObject.Save(14, &e.ipv4TTL) + stateSinkObject.Save(15, &e.ipv6HopLimit) + stateSinkObject.Save(16, &e.isConnectNotified) + stateSinkObject.Save(17, &e.h) + stateSinkObject.Save(18, &e.portFlags) + stateSinkObject.Save(19, &e.boundBindToDevice) + stateSinkObject.Save(20, &e.boundPortFlags) + stateSinkObject.Save(21, &e.boundDest) + stateSinkObject.Save(22, &e.effectiveNetProtos) + stateSinkObject.Save(23, &e.recentTSTime) + stateSinkObject.Save(24, &e.shutdownFlags) + stateSinkObject.Save(25, &e.tcpRecovery) + stateSinkObject.Save(26, &e.sack) + stateSinkObject.Save(27, &e.delay) + stateSinkObject.Save(28, &e.scoreboard) + stateSinkObject.Save(29, &e.segmentQueue) + stateSinkObject.Save(30, &e.userMSS) + stateSinkObject.Save(31, &e.maxSynRetries) + stateSinkObject.Save(32, &e.windowClamp) + stateSinkObject.Save(33, &e.sndQueueInfo) + stateSinkObject.Save(34, &e.cc) + stateSinkObject.Save(35, &e.keepalive) + stateSinkObject.Save(36, &e.userTimeout) + stateSinkObject.Save(37, &e.deferAccept) + stateSinkObject.Save(38, &e.acceptQueue) + stateSinkObject.Save(39, &e.rcv) + stateSinkObject.Save(40, &e.snd) + stateSinkObject.Save(41, &e.connectingAddress) + stateSinkObject.Save(42, &e.amss) + stateSinkObject.Save(43, &e.sendTOS) + stateSinkObject.Save(44, &e.gso) + stateSinkObject.Save(45, &e.stats) + stateSinkObject.Save(46, &e.tcpLingerTimeout) + stateSinkObject.Save(47, &e.closed) + stateSinkObject.Save(48, &e.txHash) + stateSinkObject.Save(49, &e.owner) + stateSinkObject.Save(50, &e.ops) + stateSinkObject.Save(51, &e.lastOutOfWindowAckTime) } // +checklocksignore -func (e *endpoint) StateLoad(stateSourceObject state.Source) { +func (e *Endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.TCPEndpointStateInner) stateSourceObject.Load(1, &e.TransportEndpointInfo) stateSourceObject.Load(2, &e.DefaultSocketOptionsHandler) @@ -453,47 +457,48 @@ func (e *endpoint) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(8, &e.rcvMemUsed) stateSourceObject.Load(9, &e.ownedByUser) stateSourceObject.LoadWait(10, &e.rcvQueue) - stateSourceObject.Load(12, &e.boundNICID) - stateSourceObject.Load(13, &e.ipv4TTL) - stateSourceObject.Load(14, &e.ipv6HopLimit) - stateSourceObject.Load(15, &e.isConnectNotified) - stateSourceObject.Load(16, &e.h) - stateSourceObject.Load(17, &e.portFlags) - stateSourceObject.Load(18, &e.boundBindToDevice) - stateSourceObject.Load(19, &e.boundPortFlags) - stateSourceObject.Load(20, &e.boundDest) - stateSourceObject.Load(21, &e.effectiveNetProtos) - stateSourceObject.Load(22, &e.recentTSTime) - stateSourceObject.Load(23, &e.shutdownFlags) - stateSourceObject.Load(24, &e.tcpRecovery) - stateSourceObject.Load(25, &e.sack) - stateSourceObject.Load(26, &e.delay) - stateSourceObject.Load(27, &e.scoreboard) - stateSourceObject.LoadWait(28, &e.segmentQueue) - stateSourceObject.Load(29, &e.userMSS) - stateSourceObject.Load(30, &e.maxSynRetries) - stateSourceObject.Load(31, &e.windowClamp) - stateSourceObject.Load(32, &e.sndQueueInfo) - stateSourceObject.Load(33, &e.cc) - stateSourceObject.Load(34, &e.keepalive) - stateSourceObject.Load(35, &e.userTimeout) - stateSourceObject.Load(36, &e.deferAccept) - stateSourceObject.Load(37, &e.acceptQueue) - stateSourceObject.LoadWait(38, &e.rcv) - stateSourceObject.LoadWait(39, &e.snd) - stateSourceObject.Load(40, &e.connectingAddress) - stateSourceObject.Load(41, &e.amss) - stateSourceObject.Load(42, &e.sendTOS) - stateSourceObject.Load(43, &e.gso) - stateSourceObject.Load(44, &e.stats) - stateSourceObject.Load(45, &e.tcpLingerTimeout) - stateSourceObject.Load(46, &e.closed) - stateSourceObject.Load(47, &e.txHash) - stateSourceObject.Load(48, &e.owner) - stateSourceObject.Load(49, &e.ops) - stateSourceObject.Load(50, &e.lastOutOfWindowAckTime) - stateSourceObject.LoadValue(11, new(EndpointState), func(y any) { e.loadState(y.(EndpointState)) }) - stateSourceObject.AfterLoad(e.afterLoad) + stateSourceObject.Load(12, &e.connectionDirectionState) + stateSourceObject.Load(13, &e.boundNICID) + stateSourceObject.Load(14, &e.ipv4TTL) + stateSourceObject.Load(15, &e.ipv6HopLimit) + stateSourceObject.Load(16, &e.isConnectNotified) + stateSourceObject.Load(17, &e.h) + stateSourceObject.Load(18, &e.portFlags) + stateSourceObject.Load(19, &e.boundBindToDevice) + stateSourceObject.Load(20, &e.boundPortFlags) + stateSourceObject.Load(21, &e.boundDest) + stateSourceObject.Load(22, &e.effectiveNetProtos) + stateSourceObject.Load(23, &e.recentTSTime) + stateSourceObject.Load(24, &e.shutdownFlags) + stateSourceObject.Load(25, &e.tcpRecovery) + stateSourceObject.Load(26, &e.sack) + stateSourceObject.Load(27, &e.delay) + stateSourceObject.Load(28, &e.scoreboard) + stateSourceObject.LoadWait(29, &e.segmentQueue) + stateSourceObject.Load(30, &e.userMSS) + stateSourceObject.Load(31, &e.maxSynRetries) + stateSourceObject.Load(32, &e.windowClamp) + stateSourceObject.Load(33, &e.sndQueueInfo) + stateSourceObject.Load(34, &e.cc) + stateSourceObject.Load(35, &e.keepalive) + stateSourceObject.Load(36, &e.userTimeout) + stateSourceObject.Load(37, &e.deferAccept) + stateSourceObject.Load(38, &e.acceptQueue) + stateSourceObject.LoadWait(39, &e.rcv) + stateSourceObject.LoadWait(40, &e.snd) + stateSourceObject.Load(41, &e.connectingAddress) + stateSourceObject.Load(42, &e.amss) + stateSourceObject.Load(43, &e.sendTOS) + stateSourceObject.Load(44, &e.gso) + stateSourceObject.Load(45, &e.stats) + stateSourceObject.Load(46, &e.tcpLingerTimeout) + stateSourceObject.Load(47, &e.closed) + stateSourceObject.Load(48, &e.txHash) + stateSourceObject.Load(49, &e.owner) + stateSourceObject.Load(50, &e.ops) + stateSourceObject.Load(51, &e.lastOutOfWindowAckTime) + stateSourceObject.LoadValue(11, new(EndpointState), func(y any) { e.loadState(ctx, y.(EndpointState)) }) + stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) }) } func (k *keepalive) StateTypeName() string { @@ -520,10 +525,10 @@ func (k *keepalive) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(3, &k.unacked) } -func (k *keepalive) afterLoad() {} +func (k *keepalive) afterLoad(context.Context) {} // +checklocksignore -func (k *keepalive) StateLoad(stateSourceObject state.Source) { +func (k *keepalive) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &k.idle) stateSourceObject.Load(1, &k.interval) stateSourceObject.Load(2, &k.count) @@ -558,10 +563,10 @@ func (rc *rackControl) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(5, &rc.snd) } -func (rc *rackControl) afterLoad() {} +func (rc *rackControl) afterLoad(context.Context) {} // +checklocksignore -func (rc *rackControl) StateLoad(stateSourceObject state.Source) { +func (rc *rackControl) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &rc.TCPRACKState) stateSourceObject.Load(1, &rc.exitedRecovery) stateSourceObject.Load(2, &rc.minRTT) @@ -602,10 +607,10 @@ func (r *receiver) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(7, &r.lastRcvdAckTime) } -func (r *receiver) afterLoad() {} +func (r *receiver) afterLoad(context.Context) {} // +checklocksignore -func (r *receiver) StateLoad(stateSourceObject state.Source) { +func (r *receiver) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.TCPReceiverState) stateSourceObject.Load(1, &r.ep) stateSourceObject.Load(2, &r.rcvWnd) @@ -634,10 +639,10 @@ func (r *renoState) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &r.s) } -func (r *renoState) afterLoad() {} +func (r *renoState) afterLoad(context.Context) {} // +checklocksignore -func (r *renoState) StateLoad(stateSourceObject state.Source) { +func (r *renoState) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.s) } @@ -659,10 +664,10 @@ func (rr *renoRecovery) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &rr.s) } -func (rr *renoRecovery) afterLoad() {} +func (rr *renoRecovery) afterLoad(context.Context) {} // +checklocksignore -func (rr *renoRecovery) StateLoad(stateSourceObject state.Source) { +func (rr *renoRecovery) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &rr.s) } @@ -684,10 +689,10 @@ func (sr *sackRecovery) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &sr.s) } -func (sr *sackRecovery) afterLoad() {} +func (sr *sackRecovery) afterLoad(context.Context) {} // +checklocksignore -func (sr *sackRecovery) StateLoad(stateSourceObject state.Source) { +func (sr *sackRecovery) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &sr.s) } @@ -711,10 +716,10 @@ func (s *SACKScoreboard) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &s.maxSACKED) } -func (s *SACKScoreboard) afterLoad() {} +func (s *SACKScoreboard) afterLoad(context.Context) {} // +checklocksignore -func (s *SACKScoreboard) StateLoad(stateSourceObject state.Source) { +func (s *SACKScoreboard) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.smss) stateSourceObject.Load(1, &s.maxSACKED) } @@ -777,10 +782,10 @@ func (s *segment) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(19, &s.lost) } -func (s *segment) afterLoad() {} +func (s *segment) afterLoad(context.Context) {} // +checklocksignore -func (s *segment) StateLoad(stateSourceObject state.Source) { +func (s *segment) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.segmentEntry) stateSourceObject.Load(1, &s.segmentRefs) stateSourceObject.Load(2, &s.ep) @@ -800,7 +805,7 @@ func (s *segment) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(17, &s.acked) stateSourceObject.Load(18, &s.dataMemSize) stateSourceObject.Load(19, &s.lost) - stateSourceObject.LoadValue(12, new([]byte), func(y any) { s.loadOptions(y.([]byte)) }) + stateSourceObject.LoadValue(12, new([]byte), func(y any) { s.loadOptions(ctx, y.([]byte)) }) } func (q *segmentQueue) StateTypeName() string { @@ -825,10 +830,10 @@ func (q *segmentQueue) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &q.frozen) } -func (q *segmentQueue) afterLoad() {} +func (q *segmentQueue) afterLoad(context.Context) {} // +checklocksignore -func (q *segmentQueue) StateLoad(stateSourceObject state.Source) { +func (q *segmentQueue) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.LoadWait(0, &q.list) stateSourceObject.Load(1, &q.ep) stateSourceObject.Load(2, &q.frozen) @@ -856,6 +861,7 @@ func (s *sender) StateFields() []string { "rc", "spuriousRecovery", "retransmitTS", + "startCork", } } @@ -880,12 +886,13 @@ func (s *sender) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(13, &s.rc) stateSinkObject.Save(14, &s.spuriousRecovery) stateSinkObject.Save(15, &s.retransmitTS) + stateSinkObject.Save(16, &s.startCork) } -func (s *sender) afterLoad() {} +func (s *sender) afterLoad(context.Context) {} // +checklocksignore -func (s *sender) StateLoad(stateSourceObject state.Source) { +func (s *sender) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &s.TCPSenderState) stateSourceObject.Load(1, &s.ep) stateSourceObject.Load(2, &s.lr) @@ -902,6 +909,7 @@ func (s *sender) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(13, &s.rc) stateSourceObject.Load(14, &s.spuriousRecovery) stateSourceObject.Load(15, &s.retransmitTS) + stateSourceObject.Load(16, &s.startCork) } func (r *rtt) StateTypeName() string { @@ -922,10 +930,10 @@ func (r *rtt) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &r.TCPRTTState) } -func (r *rtt) afterLoad() {} +func (r *rtt) afterLoad(context.Context) {} // +checklocksignore -func (r *rtt) StateLoad(stateSourceObject state.Source) { +func (r *rtt) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.TCPRTTState) } @@ -949,10 +957,10 @@ func (l *endpointList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *endpointList) afterLoad() {} +func (l *endpointList) afterLoad(context.Context) {} // +checklocksignore -func (l *endpointList) StateLoad(stateSourceObject state.Source) { +func (l *endpointList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -977,10 +985,10 @@ func (e *endpointEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *endpointEntry) afterLoad() {} +func (e *endpointEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *endpointEntry) StateLoad(stateSourceObject state.Source) { +func (e *endpointEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } @@ -1005,10 +1013,10 @@ func (l *segmentList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *segmentList) afterLoad() {} +func (l *segmentList) afterLoad(context.Context) {} // +checklocksignore -func (l *segmentList) StateLoad(stateSourceObject state.Source) { +func (l *segmentList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -1033,10 +1041,10 @@ func (e *segmentEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *segmentEntry) afterLoad() {} +func (e *segmentEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *segmentEntry) StateLoad(stateSourceObject state.Source) { +func (e *segmentEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } @@ -1060,9 +1068,9 @@ func (r *segmentRefs) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (r *segmentRefs) StateLoad(stateSourceObject state.Source) { +func (r *segmentRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &r.refCount) - stateSourceObject.AfterLoad(r.afterLoad) + stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) }) } func init() { @@ -1074,7 +1082,7 @@ func init() { state.Register((*SendErrors)(nil)) state.Register((*Stats)(nil)) state.Register((*sndQueueInfo)(nil)) - state.Register((*endpoint)(nil)) + state.Register((*Endpoint)(nil)) state.Register((*keepalive)(nil)) state.Register((*rackControl)(nil)) state.Register((*receiver)(nil)) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go index 20800926..7111789d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/timer.go @@ -15,7 +15,6 @@ package tcp import ( - "math" "time" "gvisor.dev/gvisor/pkg/tcpip" @@ -24,8 +23,10 @@ import ( type timerState int const ( + // The timer has not been initialized yet or has been cleaned up. + timerUninitialized timerState = iota // The timer is disabled. - timerStateDisabled timerState = iota + timerStateDisabled // The timer is enabled, but the clock timer may be set to an earlier // expiration time due to a previous orphaned state. timerStateEnabled @@ -66,6 +67,9 @@ type timer struct { // timer is the clock timer used to wait on. timer tcpip.Timer + + // callback is the function that's called when the timer expires. + callback func() } // init initializes the timer. Once it expires the function callback @@ -73,11 +77,7 @@ type timer struct { func (t *timer) init(clock tcpip.Clock, f func()) { t.state = timerStateDisabled t.clock = clock - - // Initialize a clock timer that will call the callback func, then - // immediately stop it. - t.timer = t.clock.AfterFunc(math.MaxInt64, f) - t.timer.Stop() + t.callback = f } // cleanup frees all resources associated with the timer. @@ -90,15 +90,15 @@ func (t *timer) cleanup() { *t = timer{} } -// isZero returns true if the timer is in the zero state. This is usually -// only true if init() has never been called or if cleanup has been called. -func (t *timer) isZero() bool { - return *t == timer{} +// isUninitialized returns true if the timer is in the uninitialized state. This +// is only true if init() has never been called or if cleanup has been called. +func (t *timer) isUninitialized() bool { + return t.state == timerUninitialized } // checkExpiration checks if the given timer has actually expired, it should be // called whenever the callback function is called, and is used to check if it's -// a supurious timer expiration (due to a previously orphaned timer) or a +// a spurious timer expiration (due to a previously orphaned timer) or a // legitimate one. func (t *timer) checkExpiration() bool { // Transition to fully disabled state if we're just consuming an @@ -143,8 +143,18 @@ func (t *timer) enable(d time.Duration) { // Check if we need to set the runtime timer. if t.state == timerStateDisabled || t.target.Before(t.clockTarget) { t.clockTarget = t.target - t.timer.Reset(d) + t.resetOrStart(d) } t.state = timerStateEnabled } + +// resetOrStart creates the timer if it doesn't already exist or resets it with +// the given duration if it does. +func (t *timer) resetOrStart(d time.Duration) { + if t.timer == nil { + t.timer = t.clock.AfterFunc(d, t.callback) + } else { + t.timer.Reset(d) + } +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go index 02f304ef..0c21be86 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go @@ -40,7 +40,7 @@ type udpPacket struct { senderAddress tcpip.FullAddress destinationAddress tcpip.FullAddress packetInfo tcpip.IPPacketInfo - pkt stack.PacketBufferPtr + pkt *stack.PacketBuffer receivedAt time.Time `state:".(int64)"` // tosOrTClass stores either the Type of Service for IPv4 or the Traffic Class // for IPv6. @@ -476,7 +476,7 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp dataSz := udpInfo.data.Size() pktInfo := udpInfo.ctx.PacketInfo() pkt := udpInfo.ctx.TryNewPacketBuffer(header.UDPMinimumSize+int(pktInfo.MaxHeaderLength), udpInfo.data) - if pkt.IsNil() { + if pkt == nil { return 0, &tcpip.ErrWouldBlock{} } defer pkt.DecRef() @@ -679,11 +679,6 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error { oldPortFlags := e.boundPortFlags - nextID, btd, err := e.registerWithStack(netProtos, nextID) - if err != nil { - return err - } - // Remove the old registration. if e.localPort != 0 { previousID.LocalPort = e.localPort @@ -691,6 +686,11 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error { e.stack.UnregisterTransportEndpoint(e.effectiveNetProtos, ProtocolNumber, previousID, e, oldPortFlags, e.boundBindToDevice) } + nextID, btd, err := e.registerWithStack(netProtos, nextID) + if err != nil { + return err + } + e.localPort = nextID.LocalPort e.remotePort = nextID.RemotePort e.boundBindToDevice = btd @@ -745,6 +745,9 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error { } } + if e.net.State() == transport.DatagramEndpointStateBound { + return &tcpip.ErrNotConnected{} + } return nil } @@ -770,7 +773,7 @@ func (e *endpoint) registerWithStack(netProtos []tcpip.NetworkProtocolNumber, id BindToDevice: bindToDevice, Dest: tcpip.FullAddress{}, } - port, err := e.stack.ReservePort(e.stack.Rand(), portRes, nil /* testPort */) + port, err := e.stack.ReservePort(e.stack.SecureRNG(), portRes, nil /* testPort */) if err != nil { return id, bindToDevice, err } @@ -905,7 +908,7 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { // HandlePacket is called by the stack when new packets arrive to this transport // endpoint. -func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) { +func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) { // Get the header then trim it from the view. hdr := header.UDP(pkt.TransportHeader().Slice()) netHdr := pkt.Network() @@ -997,7 +1000,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBu } } -func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt stack.PacketBufferPtr) { +func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, pkt *stack.PacketBuffer) { // Update last error first. e.lastErrorMu.Lock() e.lastError = err @@ -1022,6 +1025,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p } id := e.net.Info().ID + e.mu.RLock() e.SocketOptions().QueueErr(&tcpip.SockError{ Err: err, Cause: transErr, @@ -1038,6 +1042,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p }, NetProto: pkt.NetworkProtocolNumber, }) + e.mu.RUnlock() } // Notify of the error. @@ -1045,7 +1050,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p } // HandleError implements stack.TransportEndpoint. -func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBufferPtr) { +func (e *endpoint) HandleError(transErr stack.TransportError, pkt *stack.PacketBuffer) { // TODO(gvisor.dev/issues/5270): Handle all transport errors. switch transErr.Kind() { case stack.DestinationPortUnreachableTransportError: diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go index 546840b6..488e4660 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint_state.go @@ -15,6 +15,7 @@ package udp import ( + "context" "fmt" "time" @@ -29,22 +30,23 @@ func (p *udpPacket) saveReceivedAt() int64 { } // loadReceivedAt is invoked by stateify. -func (p *udpPacket) loadReceivedAt(nsec int64) { +func (p *udpPacket) loadReceivedAt(_ context.Context, nsec int64) { p.receivedAt = time.Unix(0, nsec) } // afterLoad is invoked by stateify. -func (e *endpoint) afterLoad() { - stack.StackFromEnv.RegisterRestoredEndpoint(e) +func (e *endpoint) afterLoad(ctx context.Context) { + stack.RestoreStackFromContext(ctx).RegisterRestoredEndpoint(e) } // beforeSave is invoked by stateify. func (e *endpoint) beforeSave() { e.freeze() + e.stack.RegisterResumableEndpoint(e) } -// Resume implements tcpip.ResumableEndpoint.Resume. -func (e *endpoint) Resume(s *stack.Stack) { +// Restore implements tcpip.RestoredEndpoint.Restore. +func (e *endpoint) Restore(s *stack.Stack) { e.thaw() e.mu.Lock() @@ -75,3 +77,8 @@ func (e *endpoint) Resume(s *stack.Stack) { panic(fmt.Sprintf("unhandled state = %s", state)) } } + +// Resume implements tcpip.ResumableEndpoint.Resume. +func (e *endpoint) Resume() { + e.thaw() +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go index 711a5ed3..7950abe5 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go @@ -43,7 +43,7 @@ func NewForwarder(s *stack.Stack, handler func(*ForwarderRequest)) *Forwarder { // // This function is expected to be passed as an argument to the // stack.SetTransportProtocolHandler function. -func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) bool { +func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool { f.handler(&ForwarderRequest{ stack: f.stack, id: id, @@ -59,7 +59,7 @@ func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketB type ForwarderRequest struct { stack *stack.Stack id stack.TransportEndpointID - pkt stack.PacketBufferPtr + pkt *stack.PacketBuffer } // ID returns the 4-tuple (src address, src port, dst address, dst port) that diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go index d4de0d2b..c26ac4d6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/protocol.go @@ -77,7 +77,7 @@ func (*protocol) ParsePorts(v []byte) (src, dst uint16, err tcpip.Error) { // HandleUnknownDestinationPacket handles packets that are targeted at this // protocol but don't match any existing endpoint. -func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) stack.UnknownDestinationPacketDisposition { +func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition { hdr := header.UDP(pkt.TransportHeader().Slice()) netHdr := pkt.Network() lengthValid, csumValid := header.UDPValid( @@ -124,7 +124,7 @@ func (*protocol) Pause() {} func (*protocol) Resume() {} // Parse implements stack.TransportProtocol.Parse. -func (*protocol) Parse(pkt stack.PacketBufferPtr) bool { +func (*protocol) Parse(pkt *stack.PacketBuffer) bool { return parse.UDP(pkt) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go index c624ba41..e1c9c9fe 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/udp_state_autogen.go @@ -3,6 +3,8 @@ package udp import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -42,10 +44,10 @@ func (p *udpPacket) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(8, &p.ttlOrHopLimit) } -func (p *udpPacket) afterLoad() {} +func (p *udpPacket) afterLoad(context.Context) {} // +checklocksignore -func (p *udpPacket) StateLoad(stateSourceObject state.Source) { +func (p *udpPacket) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &p.udpPacketEntry) stateSourceObject.Load(1, &p.netProto) stateSourceObject.Load(2, &p.senderAddress) @@ -54,7 +56,7 @@ func (p *udpPacket) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(5, &p.pkt) stateSourceObject.Load(7, &p.tosOrTClass) stateSourceObject.Load(8, &p.ttlOrHopLimit) - stateSourceObject.LoadValue(6, new(int64), func(y any) { p.loadReceivedAt(y.(int64)) }) + stateSourceObject.LoadValue(6, new(int64), func(y any) { p.loadReceivedAt(ctx, y.(int64)) }) } func (e *endpoint) StateTypeName() string { @@ -110,7 +112,7 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) { } // +checklocksignore -func (e *endpoint) StateLoad(stateSourceObject state.Source) { +func (e *endpoint) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.DefaultSocketOptionsHandler) stateSourceObject.Load(1, &e.waiterQueue) stateSourceObject.Load(2, &e.uniqueID) @@ -130,7 +132,7 @@ func (e *endpoint) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(16, &e.frozen) stateSourceObject.Load(17, &e.localPort) stateSourceObject.Load(18, &e.remotePort) - stateSourceObject.AfterLoad(e.afterLoad) + stateSourceObject.AfterLoad(func() { e.afterLoad(ctx) }) } func (l *udpPacketList) StateTypeName() string { @@ -153,10 +155,10 @@ func (l *udpPacketList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *udpPacketList) afterLoad() {} +func (l *udpPacketList) afterLoad(context.Context) {} // +checklocksignore -func (l *udpPacketList) StateLoad(stateSourceObject state.Source) { +func (l *udpPacketList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -181,10 +183,10 @@ func (e *udpPacketEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *udpPacketEntry) afterLoad() {} +func (e *udpPacketEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *udpPacketEntry) StateLoad(stateSourceObject state.Source) { +func (e *udpPacketEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } diff --git a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go index e785fe3b..91d35041 100644 --- a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter_state_autogen.go @@ -3,6 +3,8 @@ package waiter import ( + "context" + "gvisor.dev/gvisor/pkg/state" ) @@ -28,10 +30,10 @@ func (e *Entry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(2, &e.mask) } -func (e *Entry) afterLoad() {} +func (e *Entry) afterLoad(context.Context) {} // +checklocksignore -func (e *Entry) StateLoad(stateSourceObject state.Source) { +func (e *Entry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.waiterEntry) stateSourceObject.Load(1, &e.eventListener) stateSourceObject.Load(2, &e.mask) @@ -55,10 +57,10 @@ func (q *Queue) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &q.list) } -func (q *Queue) afterLoad() {} +func (q *Queue) afterLoad(context.Context) {} // +checklocksignore -func (q *Queue) StateLoad(stateSourceObject state.Source) { +func (q *Queue) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &q.list) } @@ -82,10 +84,10 @@ func (l *waiterList) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &l.tail) } -func (l *waiterList) afterLoad() {} +func (l *waiterList) afterLoad(context.Context) {} // +checklocksignore -func (l *waiterList) StateLoad(stateSourceObject state.Source) { +func (l *waiterList) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &l.head) stateSourceObject.Load(1, &l.tail) } @@ -110,10 +112,10 @@ func (e *waiterEntry) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(1, &e.prev) } -func (e *waiterEntry) afterLoad() {} +func (e *waiterEntry) afterLoad(context.Context) {} // +checklocksignore -func (e *waiterEntry) StateLoad(stateSourceObject state.Source) { +func (e *waiterEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) { stateSourceObject.Load(0, &e.next) stateSourceObject.Load(1, &e.prev) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 466cd4cc..518b943c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1107,7 +1107,7 @@ go4.org/intern # go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 ## explicit; go 1.11 go4.org/unsafe/assume-no-moving-gc -# golang.org/x/crypto v0.18.0 +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/blake2s golang.org/x/crypto/blowfish @@ -1140,7 +1140,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.21.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1177,7 +1177,7 @@ golang.org/x/oauth2/jwt ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1185,7 +1185,7 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.16.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -1358,7 +1358,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.32.0 +# google.golang.org/protobuf v1.33.0 ## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -1366,6 +1366,7 @@ google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -1390,6 +1391,7 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb @@ -1448,8 +1450,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33 -## explicit; go 1.20 +# gvisor.dev/gvisor v0.0.0-20240331093445-9d995324d058 +## explicit; go 1.21.1 gvisor.dev/gvisor/pkg/atomicbitops gvisor.dev/gvisor/pkg/bits gvisor.dev/gvisor/pkg/buffer