hotfix: fix flag context not works bug (#204)

This commit is contained in:
naison
2024-04-01 11:44:59 +08:00
committed by GitHub
parent aacdc8a6d0
commit d3aeae7573
479 changed files with 47506 additions and 9712 deletions

57
go.mod
View File

@@ -1,6 +1,8 @@
module github.com/wencaiwulue/kubevpn/v2
go 1.22
go 1.22.0
toolchain go1.22.1
require (
github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735
@@ -34,9 +36,9 @@ require (
github.com/spf13/pflag v1.0.5
go.uber.org/automaxprocs v1.5.1
golang.org/x/crypto v0.21.0
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3
golang.org/x/net v0.21.0
golang.org/x/oauth2 v0.16.0
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
golang.org/x/net v0.22.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.18.0
golang.org/x/text v0.14.0
@@ -48,13 +50,13 @@ require (
google.golang.org/protobuf v1.33.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gvisor.dev/gvisor v0.0.0-20240331093445-9d995324d058
k8s.io/api v0.29.0
k8s.io/apimachinery v0.29.0
k8s.io/api v0.31.0-alpha.0
k8s.io/apimachinery v0.31.0-alpha.0
k8s.io/cli-runtime v0.29.0
k8s.io/client-go v0.29.0
k8s.io/client-go v0.31.0-alpha.0
k8s.io/kubectl v0.29.0
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/controller-runtime v0.16.3
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
sigs.k8s.io/controller-runtime v0.17.1-0.20240327193027-21368602d84b
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3
sigs.k8s.io/yaml v1.4.0
)
@@ -106,10 +108,10 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.5.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
@@ -119,14 +121,14 @@ require (
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/swag v0.22.7 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -159,7 +161,6 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
@@ -177,7 +178,7 @@ require (
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/ginkgo/v2 v2.13.2 // indirect
github.com/onsi/ginkgo/v2 v2.17.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.12 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
@@ -189,10 +190,10 @@ require (
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.51.1 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/quic-go/qtls-go1-20 v0.4.1 // indirect
github.com/quic-go/quic-go v0.40.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
@@ -216,12 +217,12 @@ require (
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
golang.org/x/mod v0.15.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/tools v0.17.0 // indirect
golang.org/x/tools v0.19.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.160.0 // indirect
@@ -236,10 +237,10 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a // indirect
k8s.io/apiextensions-apiserver v0.29.0 // indirect
k8s.io/component-base v0.29.0 // indirect
k8s.io/klog/v2 v2.110.1 // indirect
k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 // indirect
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 // indirect
k8s.io/component-base v0.31.0-alpha.0 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect

123
go.sum
View File

@@ -374,8 +374,8 @@ github.com/ebitengine/purego v0.5.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -390,8 +390,8 @@ github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA=
@@ -428,26 +428,25 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo=
github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8=
github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
@@ -506,8 +505,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4-0.20210608040537-544b4180ac70/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -728,8 +727,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
@@ -805,8 +802,8 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -815,8 +812,8 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -887,23 +884,23 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -915,8 +912,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/quic-go/qtls-go1-20 v0.4.1 h1:D33340mCNDAIKBqXuAvexTNMUByrYmFYVfKfDN5nfFs=
github.com/quic-go/qtls-go1-20 v0.4.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
@@ -1010,8 +1007,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1103,16 +1100,16 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA=
go4.org/intern v0.0.0-20230525184215-6c62f75575cb h1:ae7kzL5Cfdmcecbh22ll7lYP3iuUdnfnhiPcSaDgH/8=
go4.org/intern v0.0.0-20230525184215-6c62f75575cb/go.mod h1:Ycrt6raEcnF5FTsLiLKkhBTO6DPX3RCUCUVnks3gFJU=
@@ -1152,8 +1149,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 h1:/RIbNt/Zr7rVhIkQhooTxCxFcdWLGIKnZA4IXNFSrvo=
golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1176,8 +1173,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1221,15 +1218,15 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1393,8 +1390,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1560,15 +1557,15 @@ inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a/go.mod h1:e83i32mAQOW1LAqEIwe
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=
k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA=
k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0=
k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc=
k8s.io/api v0.31.0-alpha.0 h1:krNb/2tioHvtaZbOMpDOLCn76Jxu8Ii/5Yu01Etp4t4=
k8s.io/api v0.31.0-alpha.0/go.mod h1:nUY37hHwD1v9hQ1dKVvfv0sr7GCaqEK14ByQgXR7F0k=
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 h1:JKCm6stpgrahKJiqNRnW7I7bwsQZIFOaBlcqbXEQ7bc=
k8s.io/apiextensions-apiserver v0.31.0-alpha.0/go.mod h1:1X7+Z8jsoPD2tvyvQ5mvBQmutx5Tchv+WwzVk4z5kFc=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o=
k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis=
k8s.io/apimachinery v0.31.0-alpha.0 h1:uZpXTYK4IqXW5I12OinXRKJYLnQq7MKokrB4b7QjOcQ=
k8s.io/apimachinery v0.31.0-alpha.0/go.mod h1:wEJvNDlfxMRaMhyv38SIHIEC9hah/xuzqUUhxIyUv7Y=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
@@ -1577,13 +1574,13 @@ k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=
k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38=
k8s.io/client-go v0.31.0-alpha.0 h1:VVxcVKbqni0wAWd3rNuXcNcpE7gT6jvVLBe/YlWKu8s=
k8s.io/client-go v0.31.0-alpha.0/go.mod h1:XM6Bsdnz08pV+PfIUNDOgblfG6PVLI9ll6xHMk1Ifso=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
k8s.io/component-base v0.31.0-alpha.0 h1:Wm2aXDIEXja0o6p/w3+MCAvvfQyvKRWEfSl82wdr1Bo=
k8s.io/component-base v0.31.0-alpha.0/go.mod h1:MDaPQWtkCUMorX4RRF32PA4rYGmVx9k5uPIze58kixM=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
@@ -1591,24 +1588,24 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8 h1:yHNkNuLjht7iq95pO9QmbjOWCguvn8mDe3lT78nqPkw=
k8s.io/kube-openapi v0.0.0-20231214164306-ab13479f8bf8/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 h1:qVoMaQV5t62UUvHe16Q3eb2c5HPzLHYzsi0Tu/xLndo=
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI=
k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
sigs.k8s.io/controller-runtime v0.17.1-0.20240327193027-21368602d84b h1:ZQ+OFhT7H5LX/MrAYqJK9NWgyAEituKyU5XO2KQdHpo=
sigs.k8s.io/controller-runtime v0.17.1-0.20240327193027-21368602d84b/go.mod h1:J31OHxL1QGCaT4cf24B9kcx8E2l7ohnVG4zxzlCuA9o=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=

View File

@@ -4,11 +4,14 @@ import (
"context"
"encoding/json"
"os"
"reflect"
"unsafe"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
v12 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/clientcmd/api/latest"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
@@ -47,7 +50,28 @@ func ConvertToKubeconfigBytes(factory cmdutil.Factory) ([]byte, string, error) {
if err != nil {
return nil, "", err
}
rawConfig, err := loader.RawConfig()
// todo: use more elegant way to get MergedRawConfig
var useReflectToGetRawConfigFunc = func() (c api.Config, err error) {
defer func() {
if er := recover(); er != nil {
err = er.(error)
}
}()
value := reflect.ValueOf(loader).Elem().Field(0)
value = reflect.NewAt(value.Type(), unsafe.Pointer(value.UnsafeAddr())).Elem()
loadingClientConfig := value.Interface().(*clientcmd.DeferredLoadingClientConfig)
value = reflect.ValueOf(loadingClientConfig).Elem().Field(3)
value = reflect.NewAt(value.Type(), unsafe.Pointer(value.UnsafeAddr())).Elem()
clientConfig := value.Interface().(*clientcmd.DirectClientConfig)
return clientConfig.MergedRawConfig()
}
rawConfig, err := useReflectToGetRawConfigFunc()
if err != nil {
rawConfig, err = loader.RawConfig()
}
if err != nil {
return nil, "", err
}
err = api.FlattenConfig(&rawConfig)
if err != nil {
return nil, "", err

View File

@@ -1,5 +1,17 @@
# Change history of go-restful
## [v3.12.0] - 2024-03-11
- add Flush method #529 (#538)
- fix: Improper handling of empty POST requests (#543)
## [v3.11.3] - 2024-01-09
- better not have 2 tags on one commit
## [v3.11.1, v3.11.2] - 2024-01-09
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
## [v3.11.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.

View File

@@ -2,7 +2,6 @@ go-restful
==========
package for building REST-style Web Services using Google Go
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful)
[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful)
@@ -95,7 +94,6 @@ There are several hooks to customize the behavior of the go-restful package.
- Trace logging
- Compression
- Encoders for other serializers
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
## Resources

View File

@@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it.
func (c *CompressingResponseWriter) Flush() {
flusher, ok := c.writer.(http.Flusher)
if !ok {
// writer doesn't support http.Flusher interface
return
}
flusher.Flush()
}
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {

View File

@@ -5,11 +5,18 @@ package restful
// that can be found in the LICENSE file.
import (
"encoding/json"
"encoding/xml"
"strings"
"sync"
)
var (
MarshalIndent = json.MarshalIndent
NewDecoder = json.NewDecoder
NewEncoder = json.NewEncoder
)
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.

View File

@@ -1,11 +0,0 @@
// +build !jsoniter
package restful
import "encoding/json"
var (
MarshalIndent = json.MarshalIndent
NewDecoder = json.NewDecoder
NewEncoder = json.NewEncoder
)

View File

@@ -1,12 +0,0 @@
// +build jsoniter
package restful
import "github.com/json-iterator/go"
var (
json = jsoniter.ConfigCompatibleWithStandardLibrary
MarshalIndent = json.MarshalIndent
NewDecoder = json.NewDecoder
NewEncoder = json.NewEncoder
)

View File

@@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
if (method == http.MethodPost ||
method == http.MethodPut ||
method == http.MethodPatch) && length == "" {
method == http.MethodPatch) && (length == "" || length == "0") {
return nil, NewError(
http.StatusUnsupportedMediaType,
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,141 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// - S maps to s and to U+017F 'ſ' Latin small letter long s
// - k maps to K and to U+212A '' Kelvin sign
//
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
return len(t) == 0
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

View File

@@ -0,0 +1,42 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gofuzz
package json
import (
"fmt"
)
func Fuzz(data []byte) (score int) {
for _, ctor := range []func() any{
func() any { return new(any) },
func() any { return new(map[string]any) },
func() any { return new([]any) },
} {
v := ctor()
err := Unmarshal(data, v)
if err != nil {
continue
}
score = 1
m, err := Marshal(v)
if err != nil {
fmt.Printf("v=%#v\n", v)
panic(err)
}
u := ctor()
err = Unmarshal(m, u)
if err != nil {
fmt.Printf("v=%#v\n", v)
fmt.Printf("m=%s\n", m)
panic(err)
}
}
return
}

View File

@@ -0,0 +1,143 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
)
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
scan := newScanner()
defer freeScanner(scan)
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
scan := newScanner()
defer freeScanner(scan)
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

View File

@@ -0,0 +1,610 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import (
"strconv"
"sync"
)
// Valid reports whether data is a valid JSON encoding.
func Valid(data []byte) bool {
scan := newScanner()
defer freeScanner(scan)
return checkValid(data, scan) == nil
}
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
// checkValid returns nil or a SyntaxError.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// A SyntaxError is a description of a JSON syntax error.
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// total bytes consumed, updated by decoder.Decode (and deliberately
// not set to zero by scan.reset)
bytes int64
}
var scannerPool = sync.Pool{
New: func() any {
return &scanner{}
},
}
func newScanner() *scanner {
scan := scannerPool.Get().(*scanner)
// scan.reset by design doesn't set bytes to zero
scan.bytes = 0
scan.reset()
return scan
}
func freeScanner(scan *scanner) {
// Avoid hanging on to too much memory in extreme cases.
if len(scan.parseState) > 1024 {
scan.parseState = nil
}
scannerPool.Put(scan)
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// This limits the max nesting depth to prevent stack overflow.
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
const maxNestingDepth = 10000
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
s.parseState = append(s.parseState, newParseState)
if len(s.parseState) <= maxNestingDepth {
return successState
}
return s.error(c, "exceeded max depth")
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
return s.pushParseState(c, parseObjectKey, scanBeginObject)
case '[':
s.step = stateBeginValueOrEmpty
return s.pushParseState(c, parseArrayValue, scanBeginArray)
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if !isSpace(c) {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal.
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}

View File

@@ -0,0 +1,495 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"encoding/json"
"io"
)
// A Decoder reads and decodes JSON values from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scanned int64 // amount of data already scanned
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// DisallowUnknownFields causes the Decoder to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v any) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
// help the compiler see that scanp is never negative, so it can remove
// some bounds checks below.
for scanp >= 0 {
// Look in the buffer for a new value.
for ; scanp < len(dec.buf); scanp++ {
c := dec.buf[scanp]
dec.scan.bytes++
switch dec.scan.step(&dec.scan, c) {
case scanEnd:
// scanEnd is delayed one byte so we decrement
// the scanner bytes count by 1 to ensure that
// this value is correct in the next call of Decode.
dec.scan.bytes--
break Input
case scanEndObject, scanEndArray:
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if stateEndValue(&dec.scan, ' ') == scanEnd {
scanp++
break Input
}
case scanError:
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
dec.scanned += int64(dec.scanp)
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON values to an output stream.
type Encoder struct {
w io.Writer
err error
escapeHTML bool
indentBuf *bytes.Buffer
indentPrefix string
indentValue string
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w, escapeHTML: true}
}
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v any) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
defer encodeStatePool.Put(e)
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
b := e.Bytes()
if enc.indentPrefix != "" || enc.indentValue != "" {
if enc.indentBuf == nil {
enc.indentBuf = new(bytes.Buffer)
}
enc.indentBuf.Reset()
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
if err != nil {
return err
}
b = enc.indentBuf.Bytes()
}
if _, err = enc.w.Write(b); err != nil {
enc.err = err
}
return err
}
// SetIndent instructs the encoder to format each subsequent encoded
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
// Calling SetIndent("", "") disables indentation.
func (enc *Encoder) SetIndent(prefix, indent string) {
enc.indentPrefix = prefix
enc.indentValue = indent
}
// SetEscapeHTML specifies whether problematic HTML characters
// should be escaped inside JSON quoted strings.
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
// to avoid certain safety problems that can arise when embedding JSON in HTML.
//
// In non-HTML settings where the escaping interferes with the readability
// of the output, SetEscapeHTML(false) disables this behavior.
func (enc *Encoder) SetEscapeHTML(on bool) {
enc.escapeHTML = on
}
// RawMessage is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage = json.RawMessage
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
type Token any
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x any
if err := dec.Decode(&x); err != nil {
return nil, err
}
return x, nil
}
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (dec *Decoder) InputOffset() int64 {
return dec.scanned + int64(dec.scanp)
}

View File

@@ -0,0 +1,218 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "unicode/utf8"
// safeSet holds the value true if the ASCII character with the given array
// position can be represented inside a JSON string without any further
// escaping.
//
// All values are true except for the ASCII control characters (0-31), the
// double quote ("), and the backslash character ("\").
var safeSet = [utf8.RuneSelf]bool{
' ': true,
'!': true,
'"': false,
'#': true,
'$': true,
'%': true,
'&': true,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
',': true,
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
';': true,
'<': true,
'=': true,
'>': true,
'?': true,
'@': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
'[': true,
'\\': false,
']': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'{': true,
'|': true,
'}': true,
'~': true,
'\u007f': true,
}
// htmlSafeSet holds the value true if the ASCII character with the given
// array position can be safely represented inside a JSON string, embedded
// inside of HTML <script> tags, without any additional escaping.
//
// All values are true except for the ASCII control characters (0-31), the
// double quote ("), the backslash character ("\"), HTML opening and closing
// tags ("<" and ">"), and the ampersand ("&").
var htmlSafeSet = [utf8.RuneSelf]bool{
' ': true,
'!': true,
'"': false,
'#': true,
'$': true,
'%': true,
'&': false,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
',': true,
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
';': true,
'<': false,
'=': true,
'>': false,
'?': true,
'@': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
'[': true,
'\\': false,
']': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'{': true,
'|': true,
'}': true,
'~': true,
'\u007f': true,
}

View File

@@ -0,0 +1,38 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
tag, opt, _ := strings.Cut(tag, ",")
return tag, tagOptions(opt)
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var name string
name, s, _ = strings.Cut(s, ",")
if name == optionName {
return true
}
}
return false
}

View File

@@ -2,31 +2,34 @@ package jsonpatch
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"reflect"
"github.com/evanphx/json-patch/v5/internal/json"
)
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
curDoc, err := cur.intoDoc()
func merge(cur, patch *lazyNode, mergeMerge bool, options *ApplyOptions) *lazyNode {
curDoc, err := cur.intoDoc(options)
if err != nil {
pruneNulls(patch)
pruneNulls(patch, options)
return patch
}
patchDoc, err := patch.intoDoc()
patchDoc, err := patch.intoDoc(options)
if err != nil {
return patch
}
mergeDocs(curDoc, patchDoc, mergeMerge)
mergeDocs(curDoc, patchDoc, mergeMerge, options)
return cur
}
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
func mergeDocs(doc, patch *partialDoc, mergeMerge bool, options *ApplyOptions) {
for k, v := range patch.obj {
if v == nil {
if mergeMerge {
@@ -42,60 +45,60 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
}
doc.obj[k] = nil
} else {
_ = doc.remove(k, &ApplyOptions{})
_ = doc.remove(k, options)
}
} else {
cur, ok := doc.obj[k]
if !ok || cur == nil {
if !mergeMerge {
pruneNulls(v)
pruneNulls(v, options)
}
_ = doc.set(k, v, &ApplyOptions{})
_ = doc.set(k, v, options)
} else {
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
_ = doc.set(k, merge(cur, v, mergeMerge, options), options)
}
}
}
}
func pruneNulls(n *lazyNode) {
sub, err := n.intoDoc()
func pruneNulls(n *lazyNode, options *ApplyOptions) {
sub, err := n.intoDoc(options)
if err == nil {
pruneDocNulls(sub)
pruneDocNulls(sub, options)
} else {
ary, err := n.intoAry()
if err == nil {
pruneAryNulls(ary)
pruneAryNulls(ary, options)
}
}
}
func pruneDocNulls(doc *partialDoc) *partialDoc {
func pruneDocNulls(doc *partialDoc, options *ApplyOptions) *partialDoc {
for k, v := range doc.obj {
if v == nil {
_ = doc.remove(k, &ApplyOptions{})
} else {
pruneNulls(v)
pruneNulls(v, options)
}
}
return doc
}
func pruneAryNulls(ary *partialArray) *partialArray {
func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray {
newAry := []*lazyNode{}
for _, v := range *ary {
for _, v := range ary.nodes {
if v != nil {
pruneNulls(v)
pruneNulls(v, options)
}
newAry = append(newAry, v)
}
*ary = newAry
ary.nodes = newAry
return ary
}
@@ -117,20 +120,34 @@ func MergePatch(docData, patchData []byte) ([]byte, error) {
}
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
doc := &partialDoc{}
if !json.Valid(docData) {
return nil, errBadJSONDoc
}
docErr := json.Unmarshal(docData, doc)
if !json.Valid(patchData) {
return nil, errBadJSONPatch
}
patch := &partialDoc{}
options := NewApplyOptions()
patchErr := json.Unmarshal(patchData, patch)
doc := &partialDoc{
opts: options,
}
docErr := doc.UnmarshalJSON(docData)
patch := &partialDoc{
opts: options,
}
patchErr := patch.UnmarshalJSON(patchData)
if isSyntaxError(docErr) {
return nil, errBadJSONDoc
}
if isSyntaxError(patchErr) {
return nil, errBadJSONPatch
return patchData, nil
}
if docErr == nil && doc.obj == nil {
@@ -138,7 +155,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
}
if patchErr == nil && patch.obj == nil {
return nil, errBadJSONPatch
return patchData, nil
}
if docErr != nil || patchErr != nil {
@@ -147,19 +164,23 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
if mergeMerge {
doc = patch
} else {
doc = pruneDocNulls(patch)
doc = pruneDocNulls(patch, options)
}
} else {
patchAry := &partialArray{}
patchErr = json.Unmarshal(patchData, patchAry)
patchErr = unmarshal(patchData, &patchAry.nodes)
if patchErr != nil {
// Not an array either, a literal is the result directly.
if json.Valid(patchData) {
return patchData, nil
}
return nil, errBadJSONPatch
}
pruneAryNulls(patchAry)
pruneAryNulls(patchAry, options)
out, patchErr := json.Marshal(patchAry)
out, patchErr := json.Marshal(patchAry.nodes)
if patchErr != nil {
return nil, errBadJSONPatch
@@ -168,13 +189,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return out, nil
}
} else {
mergeDocs(doc, patch, mergeMerge)
mergeDocs(doc, patch, mergeMerge, options)
}
return json.Marshal(doc)
}
func isSyntaxError(err error) bool {
if errors.Is(err, io.EOF) {
return true
}
if errors.Is(err, io.ErrUnexpectedEOF) {
return true
}
if _, ok := err.(*json.SyntaxError); ok {
return true
}
@@ -227,12 +254,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDoc := map[string]interface{}{}
modifiedDoc := map[string]interface{}{}
err := json.Unmarshal(originalJSON, &originalDoc)
err := unmarshal(originalJSON, &originalDoc)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
err = unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
return nil, errBadJSONDoc
}
@@ -245,6 +272,10 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
return json.Marshal(dest)
}
func unmarshal(data []byte, into interface{}) error {
return json.UnmarshalValid(data, into)
}
// createArrayMergePatch will return an array of merge-patch documents capable
// of converting the original document to the modified document for each
// pair of JSON documents provided in the arrays.
@@ -253,12 +284,12 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDocs := []json.RawMessage{}
modifiedDocs := []json.RawMessage{}
err := json.Unmarshal(originalJSON, &originalDocs)
err := unmarshal(originalJSON, &originalDocs)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
err = unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
return nil, errBadJSONDoc
}
@@ -314,6 +345,11 @@ func matchesValue(av, bv interface{}) bool {
if bt == at {
return true
}
case json.Number:
bt := bv.(json.Number)
if bt == at {
return true
}
case float64:
bt := bv.(float64)
if bt == at {
@@ -377,7 +413,7 @@ func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
if len(dst) > 0 {
into[key] = dst
}
case string, float64, bool:
case string, float64, bool, json.Number:
if !matchesValue(av, bv) {
into[key] = bv
}

View File

@@ -2,11 +2,12 @@ package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"unicode"
"github.com/evanphx/json-patch/v5/internal/json"
"github.com/pkg/errors"
)
@@ -37,6 +38,8 @@ var (
ErrInvalid = errors.New("invalid state detected")
ErrInvalidIndex = errors.New("invalid index referenced")
ErrExpectedObject = errors.New("invalid value, expected object")
rawJSONArray = []byte("[]")
rawJSONObject = []byte("{}")
rawJSONNull = []byte("null")
@@ -45,7 +48,7 @@ var (
type lazyNode struct {
raw *json.RawMessage
doc *partialDoc
ary partialArray
ary *partialArray
which int
}
@@ -56,11 +59,17 @@ type Operation map[string]*json.RawMessage
type Patch []Operation
type partialDoc struct {
self *lazyNode
keys []string
obj map[string]*lazyNode
opts *ApplyOptions
}
type partialArray []*lazyNode
type partialArray struct {
self *lazyNode
nodes []*lazyNode
}
type container interface {
get(key string, options *ApplyOptions) (*lazyNode, error)
@@ -85,6 +94,8 @@ type ApplyOptions struct {
// EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
// Default to false.
EnsurePathExistsOnAdd bool
EscapeHTML bool
}
// NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
@@ -94,6 +105,7 @@ func NewApplyOptions() *ApplyOptions {
AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
AllowMissingPathOnRemove: false,
EnsurePathExistsOnAdd: false,
EscapeHTML: true,
}
}
@@ -107,14 +119,14 @@ func newRawMessage(buf []byte) *json.RawMessage {
return &ra
}
func (n *lazyNode) MarshalJSON() ([]byte, error) {
func (n *lazyNode) RedirectMarshalJSON() (any, error) {
switch n.which {
case eRaw:
return json.Marshal(n.raw)
return n.raw, nil
case eDoc:
return json.Marshal(n.doc)
return n.doc, nil
case eAry:
return json.Marshal(n.ary)
return n.ary.nodes, nil
default:
return nil, ErrUnknownType
}
@@ -128,39 +140,50 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
return nil
}
func (n *partialDoc) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
if _, err := buf.WriteString("{"); err != nil {
return nil, err
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
if n.obj == nil {
return ErrExpectedObject
}
if err := buf.WriteByte('{'); err != nil {
return err
}
escaped := true
// n.opts should always be set, but in case we missed a case,
// guard.
if n.opts != nil {
escaped = n.opts.EscapeHTML
}
for i, k := range n.keys {
if i > 0 {
if _, err := buf.WriteString(", "); err != nil {
return nil, err
if err := buf.WriteByte(','); err != nil {
return err
}
}
key, err := json.Marshal(k)
key, err := json.MarshalEscaped(k, escaped)
if err != nil {
return nil, err
return err
}
if _, err := buf.Write(key); err != nil {
return nil, err
return err
}
if _, err := buf.WriteString(": "); err != nil {
return nil, err
if err := buf.WriteByte(':'); err != nil {
return err
}
value, err := json.Marshal(n.obj[k])
value, err := json.MarshalEscaped(n.obj[k], escaped)
if err != nil {
return nil, err
return err
}
if _, err := buf.Write(value); err != nil {
return nil, err
return err
}
}
if _, err := buf.WriteString("}"); err != nil {
return nil, err
if err := buf.WriteByte('}'); err != nil {
return err
}
return buf.Bytes(), nil
return nil
}
type syntaxError struct {
@@ -172,70 +195,29 @@ func (err *syntaxError) Error() string {
}
func (n *partialDoc) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &n.obj); err != nil {
return err
}
buffer := bytes.NewBuffer(data)
d := json.NewDecoder(buffer)
if t, err := d.Token(); err != nil {
return err
} else if t != startObject {
return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %v", t)}
}
for d.More() {
k, err := d.Token()
keys, err := json.UnmarshalValidWithKeys(data, &n.obj)
if err != nil {
return err
}
key, ok := k.(string)
if !ok {
return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)}
}
if err := skipValue(d); err != nil {
return err
}
n.keys = append(n.keys, key)
}
n.keys = keys
return nil
}
func skipValue(d *json.Decoder) error {
t, err := d.Token()
if err != nil {
return err
}
if t != startObject && t != startArray {
return nil
}
for d.More() {
if t == startObject {
// consume key token
if _, err := d.Token(); err != nil {
return err
}
}
if err := skipValue(d); err != nil {
return err
}
}
end, err := d.Token()
if err != nil {
return err
}
if t == startObject && end != endObject {
return &syntaxError{msg: "expected close object token"}
}
if t == startArray && end != endArray {
return &syntaxError{msg: "expected close object token"}
}
return nil
func (n *partialArray) UnmarshalJSON(data []byte) error {
return json.UnmarshalValid(data, &n.nodes)
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
func (n *partialArray) RedirectMarshalJSON() (interface{}, error) {
return n.nodes, nil
}
func deepCopy(src *lazyNode, options *ApplyOptions) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
a, err := src.MarshalJSON()
a, err := json.MarshalEscaped(src, options.EscapeHTML)
if err != nil {
return nil, 0, err
}
@@ -243,7 +225,17 @@ func deepCopy(src *lazyNode) (*lazyNode, int, error) {
return newLazyNode(newRawMessage(a)), sz, nil
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
func (n *lazyNode) nextByte() byte {
s := []byte(*n.raw)
for unicode.IsSpace(rune(s[0])) {
s = s[1:]
}
return s[0]
}
func (n *lazyNode) intoDoc(options *ApplyOptions) (*partialDoc, error) {
if n.which == eDoc {
return n.doc, nil
}
@@ -252,8 +244,17 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
return nil, ErrInvalid
}
err := json.Unmarshal(*n.raw, &n.doc)
if n.nextByte() != '{' {
return nil, ErrInvalid
}
err := unmarshal(*n.raw, &n.doc)
if n.doc == nil {
return nil, ErrInvalid
}
n.doc.opts = options
if err != nil {
return nil, err
}
@@ -264,21 +265,21 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
func (n *lazyNode) intoAry() (*partialArray, error) {
if n.which == eAry {
return &n.ary, nil
return n.ary, nil
}
if n.raw == nil {
return nil, ErrInvalid
}
err := json.Unmarshal(*n.raw, &n.ary)
err := unmarshal(*n.raw, &n.ary)
if err != nil {
return nil, err
}
n.which = eAry
return &n.ary, nil
return n.ary, nil
}
func (n *lazyNode) compact() []byte {
@@ -302,12 +303,16 @@ func (n *lazyNode) tryDoc() bool {
return false
}
err := json.Unmarshal(*n.raw, &n.doc)
err := unmarshal(*n.raw, &n.doc)
if err != nil {
return false
}
if n.doc == nil {
return false
}
n.which = eDoc
return true
}
@@ -317,7 +322,7 @@ func (n *lazyNode) tryAry() bool {
return false
}
err := json.Unmarshal(*n.raw, &n.ary)
err := unmarshal(*n.raw, &n.ary)
if err != nil {
return false
@@ -327,6 +332,18 @@ func (n *lazyNode) tryAry() bool {
return true
}
func (n *lazyNode) isNull() bool {
if n == nil {
return true
}
if n.raw == nil {
return true
}
return bytes.Equal(n.compact(), rawJSONNull)
}
func (n *lazyNode) equal(o *lazyNode) bool {
if n.which == eRaw {
if !n.tryDoc() && !n.tryAry() {
@@ -334,7 +351,27 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false
}
return bytes.Equal(n.compact(), o.compact())
nc := n.compact()
oc := o.compact()
if nc[0] == '"' && oc[0] == '"' {
// ok, 2 strings
var ns, os string
err := json.UnmarshalValid(nc, &ns)
if err != nil {
return false
}
err = json.UnmarshalValid(oc, &os)
if err != nil {
return false
}
return ns == os
}
return bytes.Equal(nc, oc)
}
}
@@ -380,12 +417,12 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false
}
if len(n.ary) != len(o.ary) {
if len(n.ary.nodes) != len(o.ary.nodes) {
return false
}
for idx, val := range n.ary {
if !val.equal(o.ary[idx]) {
for idx, val := range n.ary.nodes {
if !val.equal(o.ary.nodes[idx]) {
return false
}
}
@@ -398,7 +435,7 @@ func (o Operation) Kind() string {
if obj, ok := o["op"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
err := unmarshal(*obj, &op)
if err != nil {
return "unknown"
@@ -415,7 +452,7 @@ func (o Operation) Path() (string, error) {
if obj, ok := o["path"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
err := unmarshal(*obj, &op)
if err != nil {
return "unknown", err
@@ -432,7 +469,7 @@ func (o Operation) From() (string, error) {
if obj, ok := o["from"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
err := unmarshal(*obj, &op)
if err != nil {
return "unknown", err
@@ -446,6 +483,10 @@ func (o Operation) From() (string, error) {
func (o Operation) value() *lazyNode {
if obj, ok := o["value"]; ok {
// A `null` gets decoded as a nil RawMessage, so let's fix it up here.
if obj == nil {
return newLazyNode(newRawMessage(rawJSONNull))
}
return newLazyNode(obj)
}
@@ -461,7 +502,7 @@ func (o Operation) ValueInterface() (interface{}, error) {
var v interface{}
err := json.Unmarshal(*obj, &v)
err := unmarshal(*obj, &v)
if err != nil {
return nil, err
@@ -497,6 +538,9 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
split := strings.Split(path, "/")
if len(split) < 2 {
if path == "" {
return doc, ""
}
return nil, ""
}
@@ -521,7 +565,7 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
return nil, ""
}
} else {
doc, err = next.intoDoc()
doc, err = next.intoDoc(options)
if err != nil {
return nil, ""
@@ -533,6 +577,10 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
}
func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
if d.obj == nil {
return ErrExpectedObject
}
found := false
for _, k := range d.keys {
if k == key {
@@ -552,6 +600,14 @@ func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error
}
func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
if key == "" {
return d.self, nil
}
if d.obj == nil {
return nil, ErrExpectedObject
}
v, ok := d.obj[key]
if !ok {
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
@@ -560,6 +616,10 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
}
func (d *partialDoc) remove(key string, options *ApplyOptions) error {
if d.obj == nil {
return ErrExpectedObject
}
_, ok := d.obj[key]
if !ok {
if options.AllowMissingPathOnRemove {
@@ -591,19 +651,19 @@ func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) err
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(*d) {
if idx < -len(d.nodes) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(*d)
idx += len(d.nodes)
}
(*d)[idx] = val
d.nodes[idx] = val
return nil
}
func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error {
if key == "-" {
*d = append(*d, val)
d.nodes = append(d.nodes, val)
return nil
}
@@ -612,11 +672,11 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
}
sz := len(*d) + 1
sz := len(d.nodes) + 1
ary := make([]*lazyNode, sz)
cur := *d
cur := d
if idx >= len(ary) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
@@ -632,15 +692,19 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
idx += len(ary)
}
copy(ary[0:idx], cur[0:idx])
copy(ary[0:idx], cur.nodes[0:idx])
ary[idx] = val
copy(ary[idx+1:], cur[idx:])
copy(ary[idx+1:], cur.nodes[idx:])
*d = ary
d.nodes = ary
return nil
}
func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) {
if key == "" {
return d.self, nil
}
idx, err := strconv.Atoi(key)
if err != nil {
@@ -651,17 +715,17 @@ func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error)
if !options.SupportNegativeIndices {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(*d) {
if idx < -len(d.nodes) {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(*d)
idx += len(d.nodes)
}
if idx >= len(*d) {
if idx >= len(d.nodes) {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
return (*d)[idx], nil
return d.nodes[idx], nil
}
func (d *partialArray) remove(key string, options *ApplyOptions) error {
@@ -670,9 +734,9 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
return err
}
cur := *d
cur := d
if idx >= len(cur) {
if idx >= len(cur.nodes) {
if options.AllowMissingPathOnRemove {
return nil
}
@@ -683,21 +747,21 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(cur) {
if idx < -len(cur.nodes) {
if options.AllowMissingPathOnRemove {
return nil
}
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(cur)
idx += len(cur.nodes)
}
ary := make([]*lazyNode, len(cur)-1)
ary := make([]*lazyNode, len(cur.nodes)-1)
copy(ary[0:idx], cur[0:idx])
copy(ary[idx:], cur[idx+1:])
copy(ary[0:idx], cur.nodes[0:idx])
copy(ary[idx:], cur.nodes[idx+1:])
*d = ary
d.nodes = ary
return nil
}
@@ -707,6 +771,33 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
}
// special case, adding to empty means replacing the container with the value given
if path == "" {
val := op.value()
var pd container
if (*val.raw)[0] == '[' {
pd = &partialArray{
self: val,
}
} else {
pd = &partialDoc{
self: val,
opts: options,
}
}
err := json.UnmarshalValid(*val.raw, pd)
if err != nil {
return err
}
*doc = pd
return nil
}
if options.EnsurePathExistsOnAdd {
err = ensurePathExists(doc, path, options)
@@ -762,9 +853,9 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
if arrIndex, err = strconv.Atoi(part); err == nil {
pa, ok := doc.(*partialArray)
if ok && arrIndex >= len(*pa)+1 {
if ok && arrIndex >= len(pa.nodes)+1 {
// Pad the array with null values up to the required index.
for i := len(*pa); i <= arrIndex-1; i++ {
for i := len(pa.nodes); i <= arrIndex-1; i++ {
doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
}
}
@@ -798,7 +889,10 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
newNode := newLazyNode(newRawMessage(rawJSONObject))
doc.add(part, newNode, options)
doc, _ = newNode.intoDoc()
doc, err = newNode.intoDoc(options)
if err != nil {
return err
}
}
} else {
if isArray(*target.raw) {
@@ -808,7 +902,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
return err
}
} else {
doc, err = target.intoDoc()
doc, err = target.intoDoc(options)
if err != nil {
return err
@@ -894,12 +988,14 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
if !val.tryAry() {
return errors.Wrapf(err, "replace operation value must be object or array")
}
} else {
val.doc.opts = options
}
}
switch val.which {
case eAry:
*doc = &val.ary
*doc = val.ary
case eDoc:
*doc = val.doc
case eRaw:
@@ -934,6 +1030,10 @@ func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(err, "move operation failed to decode from")
}
if from == "" {
return errors.Wrapf(ErrInvalid, "unable to move entire document to another path")
}
con, key := findObject(doc, from, options)
if con == nil {
@@ -983,7 +1083,7 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
self.doc = sv
self.which = eDoc
case *partialArray:
self.ary = *sv
self.ary = sv
self.which = eAry
}
@@ -1005,12 +1105,14 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(err, "error in test for path: '%s'", path)
}
ov := op.value()
if val == nil {
if op.value() == nil || op.value().raw == nil {
if ov.isNull() {
return nil
}
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
} else if op.value() == nil {
} else if ov.isNull() {
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
}
@@ -1030,7 +1132,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
con, key := findObject(doc, from, options)
if con == nil {
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: \"%s\"", from)
}
val, err := con.get(key, options)
@@ -1049,7 +1151,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
}
valCopy, sz, err := deepCopy(val)
valCopy, sz, err := deepCopy(val, options)
if err != nil {
return errors.Wrapf(err, "error while performing deep copy")
}
@@ -1077,9 +1179,13 @@ func Equal(a, b []byte) bool {
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
func DecodePatch(buf []byte) (Patch, error) {
if !json.Valid(buf) {
return nil, ErrInvalid
}
var p Patch
err := json.Unmarshal(buf, &p)
err := unmarshal(buf, &p)
if err != nil {
return nil, err
@@ -1117,14 +1223,26 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
return doc, nil
}
var pd container
if doc[0] == '[' {
pd = &partialArray{}
} else {
pd = &partialDoc{}
if !json.Valid(doc) {
return nil, ErrInvalid
}
err := json.Unmarshal(doc, pd)
raw := json.RawMessage(doc)
self := newLazyNode(&raw)
var pd container
if doc[0] == '[' {
pd = &partialArray{
self: self,
}
} else {
pd = &partialDoc{
self: self,
opts: options,
}
}
err := unmarshal(doc, pd)
if err != nil {
return nil, err
@@ -1157,11 +1275,18 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
}
}
if indent != "" {
return json.MarshalIndent(pd, "", indent)
data, err := json.MarshalEscaped(pd, options.EscapeHTML)
if err != nil {
return nil, err
}
return json.Marshal(pd)
if indent == "" {
return data, nil
}
var buf bytes.Buffer
json.Indent(&buf, data, "", indent)
return buf.Bytes(), nil
}
// From http://tools.ietf.org/html/rfc6901#section-4 :

View File

@@ -1,61 +0,0 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package slogr enables usage of a slog.Handler with logr.Logger as front-end
// API and of a logr.LogSink through the slog.Handler and thus slog.Logger
// APIs.
//
// See the README in the top-level [./logr] package for a discussion of
// interoperability.
//
// Deprecated: use the main logr package instead.
package slogr
import (
"log/slog"
"github.com/go-logr/logr"
)
// NewLogr returns a logr.Logger which writes to the slog.Handler.
//
// Deprecated: use [logr.FromSlogHandler] instead.
func NewLogr(handler slog.Handler) logr.Logger {
return logr.FromSlogHandler(handler)
}
// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
//
// Deprecated: use [logr.ToSlogHandler] instead.
func NewSlogHandler(logger logr.Logger) slog.Handler {
return logr.ToSlogHandler(logger)
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
//
// Deprecated: use [logr.ToSlogHandler] instead.
func ToSlogHandler(logger logr.Logger) slog.Handler {
return logr.ToSlogHandler(logger)
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better.
//
// Deprecated: use [logr.SlogSink] instead.
type SlogSink = logr.SlogSink

View File

@@ -264,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {

52
vendor/github.com/go-openapi/swag/BENCHMARK.md generated vendored Normal file
View File

@@ -0,0 +1,52 @@
# Benchmarks
## Name mangling utilities
```bash
go test -bench XXX -run XXX -benchtime 30s
```
### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/swag
cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
```
### Benchmarks after PR #79
~ x10 performance improvement and ~ /100 memory allocations.
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/swag
cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
```
```
goos: linux
goarch: amd64
pkg: github.com/go-openapi/swag
cpu: AMD Ryzen 7 5800X 8-Core Processor
BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
```

View File

@@ -16,9 +16,130 @@ package swag
import (
"sort"
"strings"
"sync"
)
var (
// commonInitialisms are common acronyms that are kept as whole uppercased words.
commonInitialisms *indexOfInitialisms
// initialisms is a slice of sorted initialisms
initialisms []string
// a copy of initialisms pre-baked as []rune
initialismsRunes [][]rune
initialismsUpperCased [][]rune
isInitialism func(string) bool
maxAllocMatches int
)
func init() {
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
configuredInitialisms := map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
"CSS": true,
"DNS": true,
"EOF": true,
"GUID": true,
"HTML": true,
"HTTPS": true,
"HTTP": true,
"ID": true,
"IP": true,
"IPv4": true,
"IPv6": true,
"JSON": true,
"LHS": true,
"OAI": true,
"QPS": true,
"RAM": true,
"RHS": true,
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"UUID": true,
"URI": true,
"URL": true,
"UTF8": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
}
// a thread-safe index of initialisms
commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
initialisms = commonInitialisms.sorted()
initialismsRunes = asRunes(initialisms)
initialismsUpperCased = asUpperCased(initialisms)
maxAllocMatches = maxAllocHeuristic(initialismsRunes)
// a test function
isInitialism = commonInitialisms.isInitialism
}
func asRunes(in []string) [][]rune {
out := make([][]rune, len(in))
for i, initialism := range in {
out[i] = []rune(initialism)
}
return out
}
func asUpperCased(in []string) [][]rune {
out := make([][]rune, len(in))
for i, initialism := range in {
out[i] = []rune(upper(trim(initialism)))
}
return out
}
func maxAllocHeuristic(in [][]rune) int {
heuristic := make(map[rune]int)
for _, initialism := range in {
heuristic[initialism[0]]++
}
var maxAlloc int
for _, val := range heuristic {
if val > maxAlloc {
maxAlloc = val
}
}
return maxAlloc
}
// AddInitialisms add additional initialisms
func AddInitialisms(words ...string) {
for _, word := range words {
// commonInitialisms[upper(word)] = true
commonInitialisms.add(upper(word))
}
// sort again
initialisms = commonInitialisms.sorted()
initialismsRunes = asRunes(initialisms)
initialismsUpperCased = asUpperCased(initialisms)
}
// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
// Since go1.9, this may be implemented with sync.Map.
type indexOfInitialisms struct {
@@ -55,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
func (m *indexOfInitialisms) sorted() (result []string) {
m.sortMutex.Lock()
defer m.sortMutex.Unlock()
m.index.Range(func(key, value interface{}) bool {
m.index.Range(func(key, _ interface{}) bool {
k := key.(string)
result = append(result, k)
return true
@@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) {
sort.Sort(sort.Reverse(byInitialism(result)))
return
}
type byInitialism []string
func (s byInitialism) Len() int {
return len(s)
}
func (s byInitialism) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byInitialism) Less(i, j int) bool {
if len(s[i]) != len(s[j]) {
return len(s[i]) < len(s[j])
}
return strings.Compare(s[i], s[j]) > 0
}

View File

@@ -14,74 +14,80 @@
package swag
import "unicode"
import (
"unicode"
"unicode/utf8"
)
type (
nameLexem interface {
GetUnsafeGoName() string
GetOriginal() string
IsInitialism() bool
}
lexemKind uint8
initialismNameLexem struct {
nameLexem struct {
original string
matchedInitialism string
}
casualNameLexem struct {
original string
kind lexemKind
}
)
func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
return &initialismNameLexem{
const (
lexemKindCasualName lexemKind = iota
lexemKindInitialismName
)
func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
return nameLexem{
kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
func newCasualNameLexem(original string) *casualNameLexem {
return &casualNameLexem{
func newCasualNameLexem(original string) nameLexem {
return nameLexem{
kind: lexemKindCasualName,
original: original,
}
}
func (l *initialismNameLexem) GetUnsafeGoName() string {
func (l nameLexem) GetUnsafeGoName() string {
if l.kind == lexemKindInitialismName {
return l.matchedInitialism
}
}
var (
first rune
rest string
)
func (l *casualNameLexem) GetUnsafeGoName() string {
var first rune
var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
if i > 0 {
rest = l.original[i:]
break
}
}
if len(l.original) > 1 {
return string(unicode.ToUpper(first)) + lower(rest)
b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
defer func() {
poolOfBuffers.RedeemBuffer(b)
}()
b.WriteRune(unicode.ToUpper(first))
b.WriteString(lower(rest))
return b.String()
}
return l.original
}
func (l *initialismNameLexem) GetOriginal() string {
func (l nameLexem) GetOriginal() string {
return l.original
}
func (l *casualNameLexem) GetOriginal() string {
return l.original
}
func (l *initialismNameLexem) IsInitialism() bool {
return true
}
func (l *casualNameLexem) IsInitialism() bool {
return false
func (l nameLexem) IsInitialism() bool {
return l.kind == lexemKindInitialismName
}

View File

@@ -15,95 +15,239 @@
package swag
import (
"bytes"
"sync"
"unicode"
"unicode/utf8"
)
var nameReplaceTable = map[rune]string{
'@': "At ",
'&': "And ",
'|': "Pipe ",
'$': "Dollar ",
'!': "Bang ",
'-': "",
'_': "",
}
type (
splitter struct {
postSplitInitialismCheck bool
initialisms []string
initialismsRunes [][]rune
initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
postSplitInitialismCheck bool
}
splitterOption func(*splitter) *splitter
splitterOption func(*splitter)
initialismMatch struct {
body []rune
start, end int
complete bool
}
initialismMatches []initialismMatch
)
// split calls the splitter; splitter provides more control and post options
func split(str string) []string {
lexems := newSplitter().split(str)
result := make([]string, 0, len(lexems))
type (
// memory pools of temporary objects.
//
// These are used to recycle temporarily allocated objects
// and relieve the GC from undue pressure.
for _, lexem := range lexems {
matchesPool struct {
*sync.Pool
}
buffersPool struct {
*sync.Pool
}
lexemsPool struct {
*sync.Pool
}
splittersPool struct {
*sync.Pool
}
)
var (
// poolOfMatches holds temporary slices for recycling during the initialism match process
poolOfMatches = matchesPool{
Pool: &sync.Pool{
New: func() any {
s := make(initialismMatches, 0, maxAllocMatches)
return &s
},
},
}
poolOfBuffers = buffersPool{
Pool: &sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
},
}
poolOfLexems = lexemsPool{
Pool: &sync.Pool{
New: func() any {
s := make([]nameLexem, 0, maxAllocMatches)
return &s
},
},
}
poolOfSplitters = splittersPool{
Pool: &sync.Pool{
New: func() any {
s := newSplitter()
return &s
},
},
}
)
// nameReplaceTable finds a word representation for special characters.
func nameReplaceTable(r rune) (string, bool) {
switch r {
case '@':
return "At ", true
case '&':
return "And ", true
case '|':
return "Pipe ", true
case '$':
return "Dollar ", true
case '!':
return "Bang ", true
case '-':
return "", true
case '_':
return "", true
default:
return "", false
}
}
// split calls the splitter.
//
// Use newSplitter for more control and options
func split(str string) []string {
s := poolOfSplitters.BorrowSplitter()
lexems := s.split(str)
result := make([]string, 0, len(*lexems))
for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
poolOfLexems.RedeemLexems(lexems)
poolOfSplitters.RedeemSplitter(s)
return result
}
func (s *splitter) split(str string) []nameLexem {
return s.toNameLexems(str)
}
func newSplitter(options ...splitterOption) *splitter {
splitter := &splitter{
func newSplitter(options ...splitterOption) splitter {
s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
initialismsRunes: initialismsRunes,
initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
splitter = option(splitter)
option(&s)
}
return splitter
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
func withPostSplitInitialismCheck(s *splitter) *splitter {
s.postSplitInitialismCheck = true
return s
}
type (
initialismMatch struct {
start, end int
body []rune
complete bool
}
initialismMatches []*initialismMatch
)
// withPostSplitInitialismCheck allows to catch initialisms after main split process
func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
}
func (s *splitter) toNameLexems(name string) []nameLexem {
func (p matchesPool) BorrowMatches() *initialismMatches {
s := p.Get().(*initialismMatches)
*s = (*s)[:0] // reset slice, keep allocated capacity
return s
}
func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
s := p.Get().(*bytes.Buffer)
s.Reset()
if s.Cap() < size {
s.Grow(size)
}
return s
}
func (p lexemsPool) BorrowLexems() *[]nameLexem {
s := p.Get().(*[]nameLexem)
*s = (*s)[:0] // reset slice, keep allocated capacity
return s
}
func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
s := p.Get().(*splitter)
s.postSplitInitialismCheck = false // reset options
for _, apply := range options {
apply(s)
}
return s
}
func (p matchesPool) RedeemMatches(s *initialismMatches) {
p.Put(s)
}
func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
p.Put(s)
}
func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
p.Put(s)
}
func (p splittersPool) RedeemSplitter(s *splitter) {
p.Put(s)
}
func (m initialismMatch) isZero() bool {
return m.start == 0 && m.end == 0
}
func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
if matches == nil {
return poolOfLexems.BorrowLexems()
}
return s.mapMatchesToNameLexems(nameRunes, matches)
}
func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
matches := make(initialismMatches, 0)
func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
newMatches := make(initialismMatches, 0, len(matches))
// recycle these allocations as we loop over runes
// with such recycling, only 2 slices should be allocated per call
// instead of o(n).
newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
for _, match := range matches {
if matches != nil { // skip first iteration
for _, match := range *matches {
if keepCompleteMatch := match.complete; keepCompleteMatch {
newMatches = append(newMatches, match)
*newMatches = append(*newMatches, match)
continue
}
// drop failed match
currentMatchRune := match.body[currentRunePosition-match.start]
if !s.initialismRuneEqual(currentMatchRune, currentRune) {
if currentMatchRune != currentRune {
continue
}
@@ -125,14 +269,15 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
match.end = currentRunePosition
}
newMatches = append(newMatches, match)
*newMatches = append(*newMatches, match)
}
}
// check for new initialism matches
for _, initialism := range s.initialisms {
initialismRunes := []rune(initialism)
if s.initialismRuneEqual(initialismRunes[0], currentRune) {
newMatches = append(newMatches, &initialismMatch{
for i := range s.initialisms {
initialismRunes := s.initialismsRunes[i]
if initialismRunes[0] == currentRune {
*newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
if matches != nil {
poolOfMatches.RedeemMatches(matches)
}
matches = newMatches
}
// up to the caller to redeem this last slice
return matches
}
func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
nameLexems := make([]nameLexem, 0)
func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
nameLexems := poolOfLexems.BorrowLexems()
var lastAcceptedMatch *initialismMatch
for _, match := range matches {
var lastAcceptedMatch initialismMatch
for _, match := range *matches {
if !match.complete {
continue
}
if firstMatch := lastAcceptedMatch == nil; firstMatch {
nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
*nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
nameLexems = append(nameLexems, s.breakCasualString(middle)...)
nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
s.appendBrokenDownCasualString(nameLexems, middle)
*nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
if lastAcceptedMatch == nil {
return s.breakCasualString(nameRunes)
if lastAcceptedMatch.isZero() {
*nameLexems = (*nameLexems)[:0]
s.appendBrokenDownCasualString(nameLexems, nameRunes)
} else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
s.appendBrokenDownCasualString(nameLexems, rest)
}
if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
nameLexems = append(nameLexems, s.breakCasualString(rest)...)
}
poolOfMatches.RedeemMatches(matches)
return nameLexems
}
func (s *splitter) initialismRuneEqual(a, b rune) bool {
return a == b
}
func (s *splitter) breakInitialism(original string) nameLexem {
func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
func (s *splitter) breakCasualString(str []rune) []nameLexem {
segments := make([]nameLexem, 0)
currentSegment := ""
func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
defer func() {
poolOfBuffers.RedeemBuffer(currentSegment)
}()
addCasualNameLexem := func(original string) {
segments = append(segments, newCasualNameLexem(original))
*segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
segments = append(segments, newInitialismNameLexem(original, match))
*segments = append(*segments, newInitialismNameLexem(original, match))
}
addNameLexem := func(original string) {
var addNameLexem func(string)
if s.postSplitInitialismCheck {
for _, initialism := range s.initialisms {
if upper(initialism) == upper(original) {
addInitialismNameLexem(original, initialism)
addNameLexem = func(original string) {
for i := range s.initialisms {
if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
addInitialismNameLexem(original, s.initialisms[i])
return
}
}
}
addCasualNameLexem(original)
}
} else {
addNameLexem = addCasualNameLexem
}
for _, rn := range string(str) {
if replace, found := nameReplaceTable[rn]; found {
if currentSegment != "" {
addNameLexem(currentSegment)
currentSegment = ""
for _, rn := range str {
if replace, found := nameReplaceTable(rn); found {
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
if currentSegment != "" {
addNameLexem(currentSegment)
currentSegment = ""
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
if currentSegment != "" {
addNameLexem(currentSegment)
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
}
currentSegment = ""
currentSegment.Reset()
}
currentSegment += string(rn)
currentSegment.WriteRune(rn)
}
if currentSegment != "" {
addNameLexem(currentSegment)
if currentSegment.Len() > 0 {
addNameLexem(currentSegment.String())
}
return segments
}
// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
// it ignores leading and trailing blank spaces in the compared
// string.
//
// base is assumed to be composed of upper-cased runes, and be already
// trimmed.
//
// This code is heavily inspired from strings.EqualFold.
func isEqualFoldIgnoreSpace(base []rune, str string) bool {
var i, baseIndex int
// equivalent to b := []byte(str), but without data copy
b := hackStringBytes(str)
for i < len(b) {
if c := b[i]; c < utf8.RuneSelf {
// fast path for ASCII
if c != ' ' && c != '\t' {
break
}
i++
continue
}
// unicode case
r, size := utf8.DecodeRune(b[i:])
if !unicode.IsSpace(r) {
break
}
i += size
}
if i >= len(b) {
return len(base) == 0
}
for _, baseRune := range base {
if i >= len(b) {
break
}
if c := b[i]; c < utf8.RuneSelf {
// single byte rune case (ASCII)
if baseRune >= utf8.RuneSelf {
return false
}
baseChar := byte(baseRune)
if c != baseChar &&
!('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
return false
}
baseIndex++
i++
continue
}
// unicode case
r, size := utf8.DecodeRune(b[i:])
if unicode.ToUpper(r) != baseRune {
return false
}
baseIndex++
i += size
}
if baseIndex != len(base) {
return false
}
// all passed: now we should only have blanks
for i < len(b) {
if c := b[i]; c < utf8.RuneSelf {
// fast path for ASCII
if c != ' ' && c != '\t' {
return false
}
i++
continue
}
// unicode case
r, size := utf8.DecodeRune(b[i:])
if !unicode.IsSpace(r) {
return false
}
i += size
}
return true
}

8
vendor/github.com/go-openapi/swag/string_bytes.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
package swag
import "unsafe"
// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
func hackStringBytes(str string) []byte {
return unsafe.Slice(unsafe.StringData(str), len(str))
}

View File

@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
// commonInitialisms are common acronyms that are kept as whole uppercased words.
var commonInitialisms *indexOfInitialisms
// initialisms is a slice of sorted initialisms
var initialisms []string
var isInitialism func(string) bool
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
// The prefix function is assumed to return a string that starts with an upper case letter.
//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
func init() {
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
var configuredInitialisms = map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
"CSS": true,
"DNS": true,
"EOF": true,
"GUID": true,
"HTML": true,
"HTTPS": true,
"HTTP": true,
"ID": true,
"IP": true,
"IPv4": true,
"IPv6": true,
"JSON": true,
"LHS": true,
"OAI": true,
"QPS": true,
"RAM": true,
"RHS": true,
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"UUID": true,
"URI": true,
"URL": true,
"UTF8": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
func prefixFunc(name, in string) string {
if GoNamePrefixFunc == nil {
return "X" + in
}
// a thread-safe index of initialisms
commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
initialisms = commonInitialisms.sorted()
// a test function
isInitialism = commonInitialisms.isInitialism
return GoNamePrefixFunc(name) + in
}
const (
@@ -156,22 +105,6 @@ func SplitByFormat(data, format string) []string {
return result
}
type byInitialism []string
func (s byInitialism) Len() int {
return len(s)
}
func (s byInitialism) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byInitialism) Less(i, j int) bool {
if len(s[i]) != len(s[j]) {
return len(s[i]) < len(s[j])
}
return strings.Compare(s[i], s[j]) > 0
}
// Removes leading whitespaces
func trim(str string) string {
return strings.TrimSpace(str)
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
func Camelize(word string) (camelized string) {
func Camelize(word string) string {
camelized := poolOfBuffers.BorrowBuffer(len(word))
defer func() {
poolOfBuffers.RedeemBuffer(camelized)
}()
for pos, ru := range []rune(word) {
if pos > 0 {
camelized += string(unicode.ToLower(ru))
camelized.WriteRune(unicode.ToLower(ru))
} else {
camelized += string(unicode.ToUpper(ru))
camelized.WriteRune(unicode.ToUpper(ru))
}
}
return
return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,26 +162,31 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
in := newSplitter(withPostSplitInitialismCheck).split(name)
out := make([]string, 0, len(in))
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
in := s.split(name)
poolOfSplitters.RedeemSplitter(s)
out := make([]string, 0, len(*in))
for _, w := range in {
for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
out = append(out, trim(w.GetOriginal()))
}
}
poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
in := newSplitter(withPostSplitInitialismCheck).split(name)
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
in := s.split(name)
poolOfSplitters.RedeemSplitter(s)
out := make([]string, 0, len(in))
for _, w := range in {
out := make([]string, 0, len(*in))
for _, w := range *in {
original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
@@ -251,6 +194,8 @@ func ToHumanNameTitle(name string) string {
out = append(out, original)
}
}
poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
lexems := newSplitter(withPostSplitInitialismCheck).split(name)
s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
lexems := s.split(name)
poolOfSplitters.RedeemSplitter(s)
defer func() {
poolOfLexems.RedeemLexems(lexems)
}()
lexemes := *lexems
result := ""
for _, lexem := range lexems {
if len(lexemes) == 0 {
return ""
}
result := poolOfBuffers.BorrowBuffer(len(name))
defer func() {
poolOfBuffers.RedeemBuffer(result)
}()
// check if not starting with a letter, upper case
firstPart := lexemes[0].GetUnsafeGoName()
if lexemes[0].IsInitialism() {
firstPart = upper(firstPart)
}
if c := firstPart[0]; c < utf8.RuneSelf {
// ASCII
switch {
case 'A' <= c && c <= 'Z':
result.WriteString(firstPart)
case 'a' <= c && c <= 'z':
result.WriteByte(c - 'a' + 'A')
result.WriteString(firstPart[1:])
default:
result.WriteString(prefixFunc(name, firstPart))
// NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
// assume this is always the case
}
} else {
// unicode
firstRune, _ := utf8.DecodeRuneInString(firstPart)
switch {
case !unicode.IsLetter(firstRune):
result.WriteString(prefixFunc(name, firstPart))
case !unicode.IsUpper(firstRune):
result.WriteString(prefixFunc(name, firstPart))
/*
result.WriteRune(unicode.ToUpper(firstRune))
result.WriteString(firstPart[offset:])
*/
default:
result.WriteString(firstPart)
}
}
for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
result += goName
result.WriteString(goName)
}
if len(result) > 0 {
// Only prefix with X when the first character isn't an ascii letter
first := []rune(result)[0]
if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
if GoNamePrefixFunc == nil {
return "X" + result
}
result = GoNamePrefixFunc(name) + result
}
first = []rune(result)[0]
if unicode.IsLetter(first) && !unicode.IsUpper(first) {
result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
}
}
return result
return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
// AddInitialisms add additional initialisms
func AddInitialisms(words ...string) {
for _, word := range words {
// commonInitialisms[upper(word)] = true
commonInitialisms.add(upper(word))
}
// sort again
initialisms = commonInitialisms.sorted()
}
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string

View File

@@ -16,6 +16,7 @@ package swag
import (
"encoding/json"
"errors"
"fmt"
"path/filepath"
"reflect"
@@ -50,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
return nil, fmt.Errorf("only YAML documents that are objects are supported")
return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}

View File

@@ -56,6 +56,7 @@ type Unmarshaler struct {
// implement JSONPBMarshaler so that the custom format can be produced.
//
// The JSON unmarshaling must follow the JSON to proto specification:
//
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.

View File

@@ -55,6 +55,7 @@ type Marshaler struct {
// implement JSONPBUnmarshaler so that the custom format can be parsed.
//
// The JSON marshaling must follow the proto to JSON specification:
//
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.

View File

@@ -127,6 +127,7 @@ func Is(any *anypb.Any, m proto.Message) bool {
// The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1 +0,0 @@
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

View File

@@ -1 +0,0 @@
cover.dat

View File

@@ -1,7 +0,0 @@
all:
cover:
go test -cover -v -coverprofile=cover.dat ./...
go tool cover -func cover.dat
.PHONY: cover

View File

@@ -1,81 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"errors"
"io"
"google.golang.org/protobuf/proto"
)
// TODO: Give error package name prefix in next minor release.
var errInvalidVarint = errors.New("invalid varint32 encountered")
// ReadDelimited decodes a message from the provided length-delimited stream,
// where the length is encoded as 32-bit varint prefix to the message body.
// It returns the total number of bytes read and any applicable error. This is
// roughly equivalent to the companion Java API's
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
// calls r.Read repeatedly as required until exactly one message including its
// prefix is read and decoded (or an error has occurred). The function never
// reads more bytes from the stream than required. The function never returns
// an error if a message has been read and decoded correctly, even if the end
// of the stream has been reached in doing so. In that case, any subsequent
// calls return (0, io.EOF).
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// TODO: Consider allowing the caller to specify a decode buffer in the
// next major version.
// TODO: Consider using error wrapping to annotate error state in pass-
// through cases in the next minor version.
// Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32.
var headerBuf [binary.MaxVarintLen32]byte
var bytesRead, varIntBytes int
var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
if bytesRead >= len(headerBuf) {
return bytesRead, errInvalidVarint
}
// We have to read byte by byte here to avoid reading more bytes
// than required. Each read byte is appended to what we have
// read before.
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
if newBytesRead == 0 {
if err != nil {
return bytesRead, err
}
// A Reader should not return (0, nil); but if it does, it should
// be treated as no-op according to the Reader contract.
continue
}
bytesRead += newBytesRead
// Now present everything read so far to the varint decoder and
// see if a varint can be decoded already.
messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead])
}
messageBuf := make([]byte, messageLength)
newBytesRead, err := io.ReadFull(r, messageBuf)
bytesRead += newBytesRead
if err != nil {
return bytesRead, err
}
return bytesRead, proto.Unmarshal(messageBuf, m)
}

View File

@@ -1,16 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil

View File

@@ -1,49 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"google.golang.org/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
// TODO: Consider allowing the caller to specify an encode buffer in the
// next major version.
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
var buf [binary.MaxVarintLen32]byte
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}

View File

@@ -0,0 +1,129 @@
// Copyright (c) 2015, Wade Simmons
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package gocovmerge takes the results from multiple `go test -coverprofile`
// runs and merges them into one profile
// this file was originally taken from the gocovmerge project
// see also: https://go.shabbyrobe.org/gocovmerge
package internal
import (
"fmt"
"io"
"sort"
"golang.org/x/tools/cover"
)
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
if i < len(profiles) && profiles[i].FileName == p.FileName {
MergeCoverProfiles(profiles[i], p)
} else {
profiles = append(profiles, nil)
copy(profiles[i+1:], profiles[i:])
profiles[i] = p
}
return profiles
}
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
if len(profiles) == 0 {
return nil
}
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
return err
}
for _, p := range profiles {
for _, b := range p.Blocks {
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
return err
}
}
}
return nil
}
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
if into.Mode != merge.Mode {
return fmt.Errorf("cannot merge profiles with different modes")
}
// Since the blocks are sorted, we can keep track of where the last block
// was inserted and only look at the blocks after that as targets for merge
startIndex := 0
for _, b := range merge.Blocks {
var err error
startIndex, err = mergeProfileBlock(into, b, startIndex)
if err != nil {
return err
}
}
return nil
}
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
sortFunc := func(i int) bool {
pi := p.Blocks[i+startIndex]
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
}
i := 0
if sortFunc(i) != true {
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
}
i += startIndex
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
}
switch p.Mode {
case "set":
p.Blocks[i].Count |= pb.Count
case "count", "atomic":
p.Blocks[i].Count += pb.Count
default:
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
}
} else {
if i > 0 {
pa := p.Blocks[i-1]
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
}
}
if i < len(p.Blocks)-1 {
pa := p.Blocks[i+1]
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
}
}
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
copy(p.Blocks[i+1:], p.Blocks[i:])
p.Blocks[i] = pb
}
return i + 1, nil
}

View File

@@ -1,7 +1,6 @@
package internal
import (
"bytes"
"fmt"
"os"
"os/exec"
@@ -12,6 +11,7 @@ import (
"github.com/google/pprof/profile"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
"golang.org/x/tools/cover"
)
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
@@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
return messages, nil
}
//loads each profile, combines them, deletes them, stores them in destination
// loads each profile, merges them, deletes them, stores them in destination
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
combined := &bytes.Buffer{}
modeRegex := regexp.MustCompile(`^mode: .*\n`)
for i, profile := range profiles {
contents, err := os.ReadFile(profile)
var merged []*cover.Profile
for _, file := range profiles {
parsedProfiles, err := cover.ParseProfiles(file)
if err != nil {
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
return err
}
os.Remove(profile)
// remove the cover mode line from every file
// except the first one
if i > 0 {
contents = modeRegex.ReplaceAll(contents, []byte{})
os.Remove(file)
for _, p := range parsedProfiles {
merged = AddCoverProfile(merged, p)
}
_, err = combined.Write(contents)
// Add a newline to the end of every file if missing.
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
_, err = combined.Write([]byte("\n"))
}
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
return err
}
}
err := os.WriteFile(destination, combined.Bytes(), 0666)
err = DumpCoverProfiles(merged, dst)
if err != nil {
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
return err
}
return nil
}
@@ -184,7 +172,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) {
cmd := exec.Command("go", "tool", "cover", "-func", profile)
output, err := cmd.CombinedOutput()
if err != nil {
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
}
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
matches := re.FindStringSubmatch(string(output))

View File

@@ -1,10 +1,11 @@
package outline
import (
"github.com/onsi/ginkgo/v2/types"
"go/ast"
"go/token"
"strconv"
"github.com/onsi/ginkgo/v2/types"
)
const (

View File

@@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string {
}
name := spec.Name.String()
if name == "<nil>" {
// If the package name is not explicitly specified,
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
name = "ginkgo"
}
if name == "." {
name = ""

View File

@@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
}
func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
r.emitBlock("\n")
if r.conf.GithubOutput {
r.emitBlock(r.fi(1, "::group::%s", sectionName))
} else {
r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
}
fn()
if r.conf.GithubOutput {
r.emitBlock(r.fi(1, "::endgroup::"))
} else {
r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
}
}
func (r *DefaultReporter) DidRun(report types.SpecReport) {
v := r.conf.Verbosity()
inParallel := report.RunningInParallel
@@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
//Emit Stdout/Stderr Output
if showSeparateStdSection {
r.emitBlock("\n")
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
})
}
if showSeparateVisibilityAlwaysReportsSection {
r.emitBlock("\n")
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
r.wrapTextBlock("Report Entries", func() {
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
r.emitReportEntry(1, entry)
}
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
})
}
if showTimeline {
r.emitBlock("\n")
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
r.wrapTextBlock("Timeline", func() {
r.emitTimeline(1, report, timeline)
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
})
}
// Emit Failure Message
@@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
highlightColor := r.highlightColorForState(state)
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
if r.conf.GithubOutput {
r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
} else {
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
}
if failure.ForwardedPanic != "" {
r.emitBlock("\n")
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))

View File

@@ -15,6 +15,7 @@ import (
"fmt"
"os"
"path"
"regexp"
"strings"
"github.com/onsi/ginkgo/v2/config"
@@ -104,6 +105,8 @@ type JUnitProperty struct {
Value string `xml:"value,attr"`
}
var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
type JUnitTestCase struct {
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
Name string `xml:"name,attr"`
@@ -113,6 +116,8 @@ type JUnitTestCase struct {
Status string `xml:"status,attr"`
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
Time float64 `xml:"time,attr"`
// Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
Owner string `xml:"owner,attr,omitempty"`
//Skipped is populated with a message if the test was skipped or pending
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
//Error is populated if the test panicked or was interrupted
@@ -195,6 +200,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
if len(labels) > 0 && !config.OmitSpecLabels {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
owner := ""
for _, label := range labels {
if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
owner = matches[1]
}
}
name = strings.TrimSpace(name)
test := JUnitTestCase{
@@ -202,6 +213,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
Classname: report.SuiteDescription,
Status: spec.State.String(),
Time: spec.RunTime.Seconds(),
Owner: owner,
}
if !spec.State.Is(config.OmitTimelinesForSpecState) {
test.SystemErr = systemErrForUnstructuredReporters(spec)

View File

@@ -89,6 +89,7 @@ type ReporterConfig struct {
VeryVerbose bool
FullTrace bool
ShowNodeEvents bool
GithubOutput bool
JSONReport string
JUnitReport string
@@ -264,7 +265,7 @@ var FlagSections = GinkgoFlagSections{
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
var SuiteConfigFlags = GinkgoFlags{
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
Usage: "The seed used to randomize the spec suite."},
Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
@@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
{KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
Usage: "If set, default reporter prints easier to manage output in Github Actions."},
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},

View File

@@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac
}
}
func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
return GinkgoError{
Heading: "Contexts cannot be used in subtree tables",
Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
CodeLocation: cl,
DocLink: "table-specs",
}
}
/* Parallel Synchronization errors */
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {

View File

@@ -25,6 +25,7 @@ type GinkgoFlag struct {
DeprecatedVersion string
ExportAs string
AlwaysExport bool
}
type GinkgoFlags []GinkgoFlag
@@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error {
return nil
}
//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
result := []string{}
for _, flag := range flags {
@@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
iface := value.Interface()
switch value.Type() {
case reflect.TypeOf(string("")):
if iface.(string) != "" {
if iface.(string) != "" || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
}
case reflect.TypeOf(int64(0)):
if iface.(int64) != 0 {
if iface.(int64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
}
case reflect.TypeOf(float64(0)):
if iface.(float64) != 0 {
if iface.(float64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
}
case reflect.TypeOf(int(0)):
if iface.(int) != 0 {
if iface.(int) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
}
case reflect.TypeOf(bool(true)):
@@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
result = append(result, fmt.Sprintf("--%s", name))
}
case reflect.TypeOf(time.Duration(0)):
if iface.(time.Duration) != time.Duration(0) {
if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
}

View File

@@ -1,3 +1,3 @@
package types
const VERSION = "2.13.2"
const VERSION = "2.17.1"

View File

@@ -483,6 +483,8 @@ type Histogram struct {
// histograms.
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
// Only used for native histograms. These exemplars MUST have a timestamp.
Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
}
func (x *Histogram) Reset() {
@@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 {
return nil
}
func (x *Histogram) GetExemplars() []*Exemplar {
if x != nil {
return x.Exemplars
}
return nil
}
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
@@ -923,6 +932,7 @@ type MetricFamily struct {
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
}
func (x *MetricFamily) Reset() {
@@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric {
return nil
}
func (x *MetricFamily) GetUnit() string {
if x != nil && x.Unit != nil {
return *x.Unit
}
return ""
}
var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
@@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
@@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74,
0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74,
}
var (
@@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
19, // [19:19] is the sub-list for method output_type
19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
20, // [20:20] is the sub-list for method output_type
20, // [20:20] is the sub-list for method input_type
20, // [20:20] is the sub-list for extension type_name
20, // [20:20] is the sub-list for extension extendee
0, // [0:20] is the sub-list for field type_name
}
func init() { file_io_prometheus_client_metrics_proto_init() }

View File

@@ -14,6 +14,7 @@
package expfmt
import (
"bufio"
"fmt"
"io"
"math"
@@ -21,8 +22,8 @@ import (
"net/http"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/encoding/protodelim"
"github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
"github.com/prometheus/common/model"
)
@@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
mediatype, params, err := mime.ParseMediaType(ct)
if err != nil {
return FmtUnknown
return fmtUnknown
}
const textType = "text/plain"
@@ -52,28 +53,28 @@ func ResponseFormat(h http.Header) Format {
switch mediatype {
case ProtoType:
if p, ok := params["proto"]; ok && p != ProtoProtocol {
return FmtUnknown
return fmtUnknown
}
if e, ok := params["encoding"]; ok && e != "delimited" {
return FmtUnknown
return fmtUnknown
}
return FmtProtoDelim
return fmtProtoDelim
case textType:
if v, ok := params["version"]; ok && v != TextVersion {
return FmtUnknown
return fmtUnknown
}
return FmtText
return fmtText
}
return FmtUnknown
return fmtUnknown
}
// NewDecoder returns a new decoder based on the given input format.
// If the input format does not imply otherwise, a text format decoder is returned.
func NewDecoder(r io.Reader, format Format) Decoder {
switch format {
case FmtProtoDelim:
switch format.FormatType() {
case TypeProtoDelim:
return &protoDecoder{r: r}
}
return &textDecoder{r: r}
@@ -86,8 +87,10 @@ type protoDecoder struct {
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
_, err := pbutil.ReadDelimited(d.r, v)
if err != nil {
opts := protodelim.UnmarshalOptions{
MaxSize: -1,
}
if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
return err
}
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {

View File

@@ -18,10 +18,12 @@ import (
"io"
"net/http"
"github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
"google.golang.org/protobuf/encoding/protodelim"
"google.golang.org/protobuf/encoding/prototext"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
@@ -60,23 +62,32 @@ func (ec encoderCloser) Close() error {
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
default:
// If the escaping parameter is unknown, ignore it.
}
}
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
return fmtProtoDelim + escapingScheme
case "text":
return FmtProtoText
return fmtProtoText + escapingScheme
case "compact-text":
return FmtProtoCompact
return fmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
return fmtText + escapingScheme
}
}
return FmtText
return fmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -84,29 +95,40 @@ func Negotiate(h http.Header) Format {
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
default:
// If the escaping parameter is unknown, ignore it.
}
}
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
return fmtProtoDelim + escapingScheme
case "text":
return FmtProtoText
return fmtProtoText + escapingScheme
case "compact-text":
return FmtProtoCompact
return fmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
return fmtText + escapingScheme
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
if ver == OpenMetricsVersion_1_0_0 {
return FmtOpenMetrics_1_0_0
}
return FmtOpenMetrics_0_0_1
switch ver {
case OpenMetricsVersion_1_0_0:
return fmtOpenMetrics_1_0_0 + escapingScheme
default:
return fmtOpenMetrics_0_0_1 + escapingScheme
}
}
return FmtText
}
return fmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
@@ -115,44 +137,54 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
func NewEncoder(w io.Writer, format Format) Encoder {
switch format {
case FmtProtoDelim:
// In cases where the Format does not allow for UTF-8 names, the global
// NameEscapingScheme will be applied.
//
// NewEncoder can be called with additional options to customize the OpenMetrics text output.
// For example:
// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
//
// Extra options are ignored for all other formats.
func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
escapingScheme := format.ToEscapingScheme()
switch format.FormatType() {
case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
_, err := protodelim.MarshalTo(w, v)
return err
},
close: func() error { return nil },
}
case FmtProtoCompact:
case TypeProtoCompact:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
_, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
return err
},
close: func() error { return nil },
}
case FmtProtoText:
case TypeProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, prototext.Format(v))
_, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
return err
},
close: func() error { return nil },
}
case FmtText:
case TypeTextPlain:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
_, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
}
case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
case TypeOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
return err
},
close: func() error {

View File

@@ -14,30 +14,164 @@
// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
import (
"fmt"
"strings"
"github.com/prometheus/common/model"
)
// Format specifies the HTTP content type of the different wire protocols.
type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
// Constants to assemble the Content-Type values for the different wire
// protocols. The Content-Type strings here are all for the legacy exposition
// formats, where valid characters for metric names and label names are limited.
// Support for arbitrary UTF-8 characters in those names is already partially
// implemented in this module (see model.ValidationScheme), but to actually use
// it on the wire, new content-type strings will have to be agreed upon and
// added here.
const (
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
// The Content-Type values for the different wire protocols. Note that these
// values are now unexported. If code was relying on comparisons to these
// constants, instead use FormatType().
fmtUnknown Format = `<unknown>`
fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
fmtProtoDelim Format = protoFmt + ` encoding=delimited`
fmtProtoText Format = protoFmt + ` encoding=text`
fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
hdrContentType = "Content-Type"
hdrAccept = "Accept"
)
// FormatType is a Go enum representing the overall category for the given
// Format. As the number of Format permutations increases, doing basic string
// comparisons are not feasible, so this enum captures the most useful
// high-level attribute of the Format string.
type FormatType int
const (
TypeUnknown FormatType = iota
TypeProtoCompact
TypeProtoDelim
TypeProtoText
TypeTextPlain
TypeOpenMetrics
)
// NewFormat generates a new Format from the type provided. Mostly used for
// tests, most Formats should be generated as part of content negotiation in
// encode.go. If a type has more than one version, the latest version will be
// returned.
func NewFormat(t FormatType) Format {
switch t {
case TypeProtoCompact:
return fmtProtoCompact
case TypeProtoDelim:
return fmtProtoDelim
case TypeProtoText:
return fmtProtoText
case TypeTextPlain:
return fmtText
case TypeOpenMetrics:
return fmtOpenMetrics_1_0_0
default:
return fmtUnknown
}
}
// NewOpenMetricsFormat generates a new OpenMetrics format matching the
// specified version number.
func NewOpenMetricsFormat(version string) (Format, error) {
if version == OpenMetricsVersion_0_0_1 {
return fmtOpenMetrics_0_0_1, nil
}
if version == OpenMetricsVersion_1_0_0 {
return fmtOpenMetrics_1_0_0, nil
}
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
}
// FormatType deduces an overall FormatType for the given format.
func (f Format) FormatType() FormatType {
toks := strings.Split(string(f), ";")
params := make(map[string]string)
for i, t := range toks {
if i == 0 {
continue
}
args := strings.Split(t, "=")
if len(args) != 2 {
continue
}
params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
}
switch strings.TrimSpace(toks[0]) {
case ProtoType:
if params["proto"] != ProtoProtocol {
return TypeUnknown
}
switch params["encoding"] {
case "delimited":
return TypeProtoDelim
case "text":
return TypeProtoText
case "compact-text":
return TypeProtoCompact
default:
return TypeUnknown
}
case OpenMetricsType:
if params["charset"] != "utf-8" {
return TypeUnknown
}
return TypeOpenMetrics
case "text/plain":
v, ok := params["version"]
if !ok {
return TypeTextPlain
}
if v == TextVersion {
return TypeTextPlain
}
return TypeUnknown
default:
return TypeUnknown
}
}
// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
// "escaping" term exists, that will be used. Otherwise, the global default will
// be returned.
func (format Format) ToEscapingScheme() model.EscapingScheme {
for _, p := range strings.Split(string(format), ";") {
toks := strings.Split(p, "=")
if len(toks) != 2 {
continue
}
key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
if key == model.EscapingKey {
scheme, err := model.ToEscapingScheme(value)
if err != nil {
return model.NameEscapingScheme
}
return scheme
}
}
return model.NameEscapingScheme
}

View File

@@ -22,11 +22,47 @@ import (
"strconv"
"strings"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
type encoderOption struct {
withCreatedLines bool
withUnit bool
}
type EncoderOption func(*encoderOption)
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
// to include _created lines (See
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
// Created timestamps can improve the accuracy of series reset detection, but
// come with a bandwidth cost.
//
// At the time of writing, created timestamp ingestion is still experimental in
// Prometheus and need to be enabled with the feature-flag
// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
// still possible. Therefore, it is recommended to use this feature with caution.
func WithCreatedLines() EncoderOption {
return func(t *encoderOption) {
t.withCreatedLines = true
}
}
// WithUnit is an EncoderOption enabling a set unit to be written to the output
// and to be added to the metric name, if it's not there already, as a suffix.
// Without opting in this way, the unit will not be added to the metric name and,
// on top of that, the unit will not be passed onto the output, even if it
// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
func WithUnit() EncoderOption {
return func(t *encoderOption) {
t.withUnit = true
}
}
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
@@ -35,6 +71,18 @@ import (
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
// If metric names conform to the legacy validation pattern, they will be placed
// outside the brackets in the traditional way, like `foo{}`. If the metric name
// fails the legacy validation check, it will be placed quoted inside the
// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// Similar to metric names, if label names conform to the legacy validation
// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
// name fails the legacy validation check, it will be quoted:
// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
@@ -47,20 +95,34 @@ import (
// Prometheus to OpenMetrics or vice versa:
//
// - Counters are expected to have the `_total` suffix in their metric name. In
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
// line. A counter with a missing `_total` suffix is not an error. However,
// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
// lines. A counter with a missing `_total` suffix is not an error. However,
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
// output.
//
// - No support for the following (optional) features: `# UNIT` line, `_created`
// line, info type, stateset type, gaugehistogram type.
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
// the unit has to be present in the metric name as its suffix:
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
// However, in order to accommodate any potential scenario where such a change in the
// metric name is not desirable, the users are here given the choice of either explicitly
// opt in, in case they wish for the unit to be included in the output AND in the metric name
// as a suffix (see the description of the WithUnit function above),
// or not to opt in, in case they don't want for any of that to happen.
//
// - No support for the following (optional) features: info type,
// stateset type, gaugehistogram type.
//
// - The size of exemplar labels is not checked (i.e. it's possible to create
// exemplars that are larger than allowed by the OpenMetrics specification).
//
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
toOM := encoderOption{}
for _, option := range options {
option(&toOM)
}
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
@@ -85,10 +147,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
var (
n int
metricType = in.GetType()
shortName = name
compliantName = name
)
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
shortName = name[:len(name)-6]
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
compliantName = name[:len(name)-6]
}
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
}
// Comments, first HELP, then TYPE.
@@ -98,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
n, err = w.WriteString(shortName)
n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -124,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
n, err = w.WriteString(shortName)
n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -151,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
if toOM.withUnit && in.Unit != nil {
n, err = w.WriteString("# UNIT ")
written += n
if err != nil {
return
}
n, err = writeName(w, compliantName)
written += n
if err != nil {
return
}
err = w.WriteByte(' ')
written++
if err != nil {
return
}
n, err = writeEscapedString(w, *in.Unit, true)
written += n
if err != nil {
return
}
err = w.WriteByte('\n')
written++
if err != nil {
return
}
}
var createdTsBytesWritten int
// Finally the samples, one line for each.
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
compliantName = compliantName + "_total"
}
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
"expected counter in metric %s %s", name, metric,
"expected counter in metric %s %s", compliantName, metric,
)
}
// Note that we have ensured above that either the name
// ends on `_total` or that the rendered type is
// `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
w, compliantName, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
n += createdTsBytesWritten
}
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
"expected gauge in metric %s %s", name, metric,
"expected gauge in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
w, compliantName, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
"expected untyped in metric %s %s", name, metric,
"expected untyped in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
w, compliantName, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
"expected summary in metric %s %s", name, metric,
"expected summary in metric %s %s", compliantName, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
w, name, "", metric,
w, compliantName, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
@@ -210,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
w, compliantName, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
@@ -219,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
w, compliantName, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
n += createdTsBytesWritten
}
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
"expected histogram in metric %s %s", name, metric,
"expected histogram in metric %s %s", compliantName, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
w, compliantName, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
@@ -247,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
if !infSeen {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
w, compliantName, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
@@ -258,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
w, compliantName, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
@@ -267,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
w, compliantName, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
n += createdTsBytesWritten
}
default:
return written, fmt.Errorf(
"unexpected type in metric %s %s", name, metric,
"unexpected type in metric %s %s", compliantName, metric,
)
}
written += n
@@ -303,21 +410,9 @@ func writeOpenMetricsSample(
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeOpenMetricsLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
written := 0
n, err := writeOpenMetricsNameAndLabelPairs(
w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -350,7 +445,7 @@ func writeOpenMetricsSample(
return written, err
}
}
if exemplar != nil {
if exemplar != nil && len(exemplar.Label) > 0 {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
@@ -365,27 +460,58 @@ func writeOpenMetricsSample(
return written, nil
}
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
// in OpenMetrics style.
func writeOpenMetricsLabelPairs(
// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
// formats the float in OpenMetrics style.
func writeOpenMetricsNameAndLabelPairs(
w enhancedWriter,
name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
metricInsideBraces = false
)
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted.
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
separator = ','
}
n, err := writeName(w, name)
written += n
if err != nil {
return written, err
}
}
if len(in) == 0 && additionalLabelName == "" {
if metricInsideBraces {
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
}
return written, nil
}
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -442,6 +568,49 @@ func writeOpenMetricsLabelPairs(
return written, nil
}
// writeOpenMetricsCreated writes the created timestamp for a single time series
// following OpenMetrics text format to w, given the metric name, the metric proto
// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
// an additional label name with a float64 value (use empty string as label name if
// not required) and the timestamp that represents the created timestamp.
// The function returns the number of bytes written and any error encountered.
func writeOpenMetricsCreated(w enhancedWriter,
name, suffixToTrim string, metric *dto.Metric,
additionalLabelName string, additionalLabelValue float64,
createdTimestamp *timestamppb.Timestamp,
) (int, error) {
written := 0
n, err := writeOpenMetricsNameAndLabelPairs(
w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('\n')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
@@ -451,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
written += n
if err != nil {
return written, err

View File

@@ -62,6 +62,18 @@ var (
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
// If metric names conform to the legacy validation pattern, they will be placed
// outside the brackets in the traditional way, like `foo{}`. If the metric name
// fails the legacy validation check, it will be placed quoted inside the
// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// Similar to metric names, if label names conform to the legacy validation
// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
// name fails the legacy validation check, it will be quoted:
// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
@@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
n, err = w.WriteString(name)
n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
n, err = w.WriteString(name)
n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -280,21 +292,9 @@ func writeSample(
additionalLabelName string, additionalLabelValue float64,
value float64,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
written := 0
n, err := writeNameAndLabelPairs(
w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -330,32 +330,64 @@ func writeSample(
return written, nil
}
// writeLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the
// text format and writes it to 'w'. An empty slice in combination with an empty
// string 'additionalLabelName' results in nothing being written. Otherwise, the
// label pairs are written, escaped as required by the text format, and enclosed
// in '{...}'. The function returns the number of bytes written and any error
// encountered.
func writeLabelPairs(
// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given metric name and additional label pair into text formatted as
// required by the text format and writes it to 'w'. An empty slice in
// combination with an empty string 'additionalLabelName' results in nothing
// being written. Otherwise, the label pairs are written, escaped as required by
// the text format, and enclosed in '{...}'. The function returns the number of
// bytes written and any error encountered. If the metric name is not
// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
// label names will also be quoted.
func writeNameAndLabelPairs(
w enhancedWriter,
name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
metricInsideBraces = false
)
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces.
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
separator = ','
}
n, err := writeName(w, name)
written += n
if err != nil {
return written, err
}
}
if len(in) == 0 && additionalLabelName == "" {
if metricInsideBraces {
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
}
return written, nil
}
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
numBufPool.Put(bp)
return written, err
}
// writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) {
if model.IsValidLegacyMetricName(model.LabelValue(name)) {
return w.WriteString(name)
}
var written int
var err error
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
var n int
n, err = writeEscapedString(w, name, true)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
return written, err
}

View File

@@ -16,6 +16,7 @@ package expfmt
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
@@ -24,8 +25,9 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"github.com/prometheus/common/model"
)
// A stateFn is a function that represents a state in a state machine. By
@@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
if p.err == io.EOF {
if p.err != nil && errors.Is(p.err, io.EOF) {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
@@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn {
// which is not an error but the signal that we are done.
// Any other error that happens to align with the start of
// a line is still an error.
if p.err == io.EOF {
if errors.Is(p.err, io.EOF) {
p.err = nil
}
return nil

View File

@@ -90,13 +90,13 @@ func (a *Alert) Validate() error {
return fmt.Errorf("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
return fmt.Errorf("invalid label set: %s", err)
return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
return fmt.Errorf("invalid annotations: %s", err)
return fmt.Errorf("invalid annotations: %w", err)
}
return nil
}

View File

@@ -97,18 +97,26 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith.
type LabelName string
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
// method, however, does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
// names, and iff it's valid UTF-8 if NameValidationScheme is set to
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
// check but a much faster hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
switch NameValidationScheme {
case LegacyValidation:
for i, b := range ln {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
return false
}
}
case UTF8Validation:
return utf8.ValidString(string(ln))
default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
return true
}
@@ -164,7 +172,7 @@ func (l LabelNames) String() string {
// A LabelValue is an associated value for a LabelName.
type LabelValue string
// IsValid returns true iff the string is a valid UTF8.
// IsValid returns true iff the string is a valid UTF-8.
func (lv LabelValue) IsValid() bool {
return utf8.ValidString(string(lv))
}

View File

@@ -14,10 +14,12 @@
package model
import (
"bytes"
"encoding/json"
"fmt"
"slices"
"sort"
"strings"
"strconv"
)
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
@@ -129,14 +131,27 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
return result
}
// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
func (l LabelSet) String() string {
lstrs := make([]string, 0, len(l))
for l, v := range l {
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
var lna [32]LabelName // On stack to avoid memory allocation for sorting names.
labelNames := lna[:0]
for name := range l {
labelNames = append(labelNames, name)
}
sort.Strings(lstrs)
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
slices.Sort(labelNames)
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
b := bytes.NewBuffer(bytea[:0])
b.WriteByte('{')
for i, name := range labelNames {
if i > 0 {
b.WriteString(", ")
}
b.WriteString(string(name))
b.WriteByte('=')
b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[name])))
}
b.WriteByte('}')
return b.String()
}
// Fingerprint returns the LabelSet's fingerprint.

28
vendor/github.com/prometheus/common/model/metadata.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
// MetricType represents metric type values.
type MetricType string
const (
MetricTypeCounter = MetricType("counter")
MetricTypeGauge = MetricType("gauge")
MetricTypeHistogram = MetricType("histogram")
MetricTypeGaugeHistogram = MetricType("gaugehistogram")
MetricTypeSummary = MetricType("summary")
MetricTypeInfo = MetricType("info")
MetricTypeStateset = MetricType("stateset")
MetricTypeUnknown = MetricType("unknown")
)

View File

@@ -18,15 +18,84 @@ import (
"regexp"
"sort"
"strings"
"unicode/utf8"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
)
var (
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
// NameValidationScheme determines the method of name validation to be used by
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
// in isolation from other components that don't support UTF-8 may result in
// bugs or other undefined behavior. This value is intended to be set by
// UTF-8-aware binaries as part of their startup. To avoid need for locking,
// this value should be set once, ideally in an init(), before multiple
// goroutines are started.
NameValidationScheme = LegacyValidation
// NameEscapingScheme defines the default way that names will be
// escaped when presented to systems that do not support UTF-8 names. If the
// Content-Type "escaping" term is specified, that will override this value.
NameEscapingScheme = ValueEncodingEscaping
)
// ValidationScheme is a Go enum for determining how metric and label names will
// be validated by this library.
type ValidationScheme int
const (
// LegacyValidation is a setting that requirets that metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
LegacyValidation ValidationScheme = iota
// UTF8Validation only requires that metric and label names be valid UTF-8
// strings.
UTF8Validation
)
type EscapingScheme int
const (
// NoEscaping indicates that a name will not be escaped. Unescaped names that
// do not conform to the legacy validity check will use a new exposition
// format syntax that will be officially standardized in future versions.
NoEscaping EscapingScheme = iota
// UnderscoreEscaping replaces all legacy-invalid characters with underscores.
UnderscoreEscaping
// DotsEscaping is similar to UnderscoreEscaping, except that dots are
// converted to `_dot_` and pre-existing underscores are converted to `__`.
DotsEscaping
// ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
// characters with the unicode value, surrounded by underscores. Single
// underscores are replaced with double underscores.
ValueEncodingEscaping
)
const (
// EscapingKey is the key in an Accept or Content-Type header that defines how
// metric and label names that do not conform to the legacy character
// requirements should be escaped when being scraped by a legacy prometheus
// system. If a system does not explicitly pass an escaping parameter in the
// Accept header, the default NameEscapingScheme will be used.
EscapingKey = "escaping"
// Possible values for Escaping Key:
AllowUTF8 = "allow-utf-8" // No escaping required.
EscapeUnderscores = "underscores"
EscapeDots = "dots"
EscapeValues = "values"
)
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
type Metric LabelSet
@@ -86,17 +155,303 @@ func (m Metric) FastFingerprint() Fingerprint {
return LabelSet(m).FastFingerprint()
}
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
// selected.
func IsValidMetricName(n LabelValue) bool {
switch NameValidationScheme {
case LegacyValidation:
return IsValidLegacyMetricName(n)
case UTF8Validation:
if len(n) == 0 {
return false
}
return utf8.ValidString(string(n))
default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
}
// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
func IsValidMetricName(n LabelValue) bool {
func IsValidLegacyMetricName(n LabelValue) bool {
if len(n) == 0 {
return false
}
for i, b := range n {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
if !isValidLegacyRune(b, i) {
return false
}
}
return true
}
// EscapeMetricFamily escapes the given metric names and labels with the given
// escaping scheme. Returns a new object that uses the same pointers to fields
// when possible and creates new escaped versions so as not to mutate the
// input.
func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
if v == nil {
return nil
}
if scheme == NoEscaping {
return v
}
out := &dto.MetricFamily{
Help: v.Help,
Type: v.Type,
Unit: v.Unit,
}
// If the name is nil, copy as-is, don't try to escape.
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
out.Name = v.Name
} else {
out.Name = proto.String(EscapeName(v.GetName(), scheme))
}
for _, m := range v.Metric {
if !metricNeedsEscaping(m) {
out.Metric = append(out.Metric, m)
continue
}
escaped := &dto.Metric{
Gauge: m.Gauge,
Counter: m.Counter,
Summary: m.Summary,
Untyped: m.Untyped,
Histogram: m.Histogram,
TimestampMs: m.TimestampMs,
}
for _, l := range m.Label {
if l.GetName() == MetricNameLabel {
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
escaped.Label = append(escaped.Label, l)
continue
}
escaped.Label = append(escaped.Label, &dto.LabelPair{
Name: proto.String(MetricNameLabel),
Value: proto.String(EscapeName(l.GetValue(), scheme)),
})
continue
}
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
escaped.Label = append(escaped.Label, l)
continue
}
escaped.Label = append(escaped.Label, &dto.LabelPair{
Name: proto.String(EscapeName(l.GetName(), scheme)),
Value: l.Value,
})
}
out.Metric = append(out.Metric, escaped)
}
return out
}
func metricNeedsEscaping(m *dto.Metric) bool {
for _, l := range m.Label {
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
return true
}
if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
return true
}
}
return false
}
const (
lowerhex = "0123456789abcdef"
)
// EscapeName escapes the incoming name according to the provided escaping
// scheme. Depending on the rules of escaping, this may cause no change in the
// string that is returned. (Especially NoEscaping, which by definition is a
// noop). This function does not do any validation of the name.
func EscapeName(name string, scheme EscapingScheme) string {
if len(name) == 0 {
return name
}
var escaped strings.Builder
switch scheme {
case NoEscaping:
return name
case UnderscoreEscaping:
if IsValidLegacyMetricName(LabelValue(name)) {
return name
}
for i, b := range name {
if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else {
escaped.WriteRune('_')
}
}
return escaped.String()
case DotsEscaping:
// Do not early return for legacy valid names, we still escape underscores.
for i, b := range name {
if b == '_' {
escaped.WriteString("__")
} else if b == '.' {
escaped.WriteString("_dot_")
} else if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else {
escaped.WriteRune('_')
}
}
return escaped.String()
case ValueEncodingEscaping:
if IsValidLegacyMetricName(LabelValue(name)) {
return name
}
escaped.WriteString("U__")
for i, b := range name {
if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else if !utf8.ValidRune(b) {
escaped.WriteString("_FFFD_")
} else if b < 0x100 {
escaped.WriteRune('_')
for s := 4; s >= 0; s -= 4 {
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
}
escaped.WriteRune('_')
} else if b < 0x10000 {
escaped.WriteRune('_')
for s := 12; s >= 0; s -= 4 {
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
}
escaped.WriteRune('_')
}
}
return escaped.String()
default:
panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
}
}
// lower function taken from strconv.atoi
func lower(c byte) byte {
return c | ('x' - 'X')
}
// UnescapeName unescapes the incoming name according to the provided escaping
// scheme if possible. Some schemes are partially or totally non-roundtripable.
// If any error is enountered, returns the original input.
func UnescapeName(name string, scheme EscapingScheme) string {
if len(name) == 0 {
return name
}
switch scheme {
case NoEscaping:
return name
case UnderscoreEscaping:
// It is not possible to unescape from underscore replacement.
return name
case DotsEscaping:
name = strings.ReplaceAll(name, "_dot_", ".")
name = strings.ReplaceAll(name, "__", "_")
return name
case ValueEncodingEscaping:
escapedName, found := strings.CutPrefix(name, "U__")
if !found {
return name
}
var unescaped strings.Builder
TOP:
for i := 0; i < len(escapedName); i++ {
// All non-underscores are treated normally.
if escapedName[i] != '_' {
unescaped.WriteByte(escapedName[i])
continue
}
i++
if i >= len(escapedName) {
return name
}
// A double underscore is a single underscore.
if escapedName[i] == '_' {
unescaped.WriteByte('_')
continue
}
// We think we are in a UTF-8 code, process it.
var utf8Val uint
for j := 0; i < len(escapedName); j++ {
// This is too many characters for a utf8 value.
if j > 4 {
return name
}
// Found a closing underscore, convert to a rune, check validity, and append.
if escapedName[i] == '_' {
utf8Rune := rune(utf8Val)
if !utf8.ValidRune(utf8Rune) {
return name
}
unescaped.WriteRune(utf8Rune)
continue TOP
}
r := lower(escapedName[i])
utf8Val *= 16
if r >= '0' && r <= '9' {
utf8Val += uint(r) - '0'
} else if r >= 'a' && r <= 'f' {
utf8Val += uint(r) - 'a' + 10
} else {
return name
}
i++
}
// Didn't find closing underscore, invalid.
return name
}
return unescaped.String()
default:
panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
}
}
func isValidLegacyRune(b rune, i int) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
}
func (e EscapingScheme) String() string {
switch e {
case NoEscaping:
return AllowUTF8
case UnderscoreEscaping:
return EscapeUnderscores
case DotsEscaping:
return EscapeDots
case ValueEncodingEscaping:
return EscapeValues
default:
panic(fmt.Sprintf("unknown format scheme %d", e))
}
}
func ToEscapingScheme(s string) (EscapingScheme, error) {
if s == "" {
return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
}
switch s {
case AllowUTF8:
return NoEscaping, nil
case EscapeUnderscores:
return UnderscoreEscaping, nil
case EscapeDots:
return DotsEscaping, nil
case EscapeValues:
return ValueEncodingEscaping, nil
default:
return NoEscaping, fmt.Errorf("unknown format scheme " + s)
}
}

View File

@@ -22,10 +22,8 @@ import (
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
var (
// cache the signature of an empty label set.
emptyLabelSignature = hashNew()
)
// cache the signature of an empty label set.
var emptyLabelSignature = hashNew()
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label

View File

@@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
return fmt.Errorf("invalid matcher: %s", err)
return fmt.Errorf("invalid matcher: %w", err)
}
}
if s.StartsAt.IsZero() {

View File

@@ -21,14 +21,12 @@ import (
"strings"
)
var (
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
ZeroSample = Sample{Timestamp: Earliest}
)
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
var ZeroSample = Sample{Timestamp: Earliest}
// Sample is a sample pair associated with a metric. A single sample must either
// define Value or Histogram but not both. Histogram == nil implies the Value
@@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
value, err := strconv.ParseFloat(f, 64)
if err != nil {
return fmt.Errorf("error parsing sample value: %s", err)
return fmt.Errorf("error parsing sample value: %w", err)
}
s.Value = SampleValue(value)
return nil

View File

@@ -20,14 +20,12 @@ import (
"strconv"
)
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
)
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
var ZeroSamplePair = SamplePair{Timestamp: Earliest}
// A SampleValue is a representation of a value for a given sample at a given
// time.

View File

@@ -61,11 +61,11 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.54.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
GOLANGCI_LINT_VERSION ?= v1.55.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
ifneq (,$(SKIP_GOLANGCI_LINT))
@@ -169,12 +169,16 @@ common-vet:
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-lint-fix
common-lint-fix: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint fix"
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"

View File

@@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])

View File

@@ -126,6 +126,7 @@ type Meminfo struct {
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
@@ -140,6 +141,55 @@ type Meminfo struct {
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
// The struct fields below are the byte-normalized counterparts to the
// existing struct fields. Values are normalized using the optional
// unit field in the meminfo line.
MemTotalBytes *uint64
MemFreeBytes *uint64
MemAvailableBytes *uint64
BuffersBytes *uint64
CachedBytes *uint64
SwapCachedBytes *uint64
ActiveBytes *uint64
InactiveBytes *uint64
ActiveAnonBytes *uint64
InactiveAnonBytes *uint64
ActiveFileBytes *uint64
InactiveFileBytes *uint64
UnevictableBytes *uint64
MlockedBytes *uint64
SwapTotalBytes *uint64
SwapFreeBytes *uint64
DirtyBytes *uint64
WritebackBytes *uint64
AnonPagesBytes *uint64
MappedBytes *uint64
ShmemBytes *uint64
SlabBytes *uint64
SReclaimableBytes *uint64
SUnreclaimBytes *uint64
KernelStackBytes *uint64
PageTablesBytes *uint64
NFSUnstableBytes *uint64
BounceBytes *uint64
WritebackTmpBytes *uint64
CommitLimitBytes *uint64
CommittedASBytes *uint64
VmallocTotalBytes *uint64
VmallocUsedBytes *uint64
VmallocChunkBytes *uint64
PercpuBytes *uint64
HardwareCorruptedBytes *uint64
AnonHugePagesBytes *uint64
ShmemHugePagesBytes *uint64
ShmemPmdMappedBytes *uint64
CmaTotalBytes *uint64
CmaFreeBytes *uint64
HugepagesizeBytes *uint64
DirectMap4kBytes *uint64
DirectMap2MBytes *uint64
DirectMap1GBytes *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
// Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
var val, valBytes uint64
v, err := strconv.ParseUint(fields[1], 0, 64)
val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
switch len(fields) {
case 2:
// No unit present, use the parsed the value as bytes directly.
valBytes = val
case 3:
// Unit present in optional 3rd field, convert it to
// bytes. The only unit supported within the Linux
// kernel is `kB`.
if fields[2] != "kB" {
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
}
valBytes = 1024 * val
default:
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
switch fields[0] {
case "MemTotal:":
m.MemTotal = &v
m.MemTotal = &val
m.MemTotalBytes = &valBytes
case "MemFree:":
m.MemFree = &v
m.MemFree = &val
m.MemFreeBytes = &valBytes
case "MemAvailable:":
m.MemAvailable = &v
m.MemAvailable = &val
m.MemAvailableBytes = &valBytes
case "Buffers:":
m.Buffers = &v
m.Buffers = &val
m.BuffersBytes = &valBytes
case "Cached:":
m.Cached = &v
m.Cached = &val
m.CachedBytes = &valBytes
case "SwapCached:":
m.SwapCached = &v
m.SwapCached = &val
m.SwapCachedBytes = &valBytes
case "Active:":
m.Active = &v
m.Active = &val
m.ActiveBytes = &valBytes
case "Inactive:":
m.Inactive = &v
m.Inactive = &val
m.InactiveBytes = &valBytes
case "Active(anon):":
m.ActiveAnon = &v
m.ActiveAnon = &val
m.ActiveAnonBytes = &valBytes
case "Inactive(anon):":
m.InactiveAnon = &v
m.InactiveAnon = &val
m.InactiveAnonBytes = &valBytes
case "Active(file):":
m.ActiveFile = &v
m.ActiveFile = &val
m.ActiveFileBytes = &valBytes
case "Inactive(file):":
m.InactiveFile = &v
m.InactiveFile = &val
m.InactiveFileBytes = &valBytes
case "Unevictable:":
m.Unevictable = &v
m.Unevictable = &val
m.UnevictableBytes = &valBytes
case "Mlocked:":
m.Mlocked = &v
m.Mlocked = &val
m.MlockedBytes = &valBytes
case "SwapTotal:":
m.SwapTotal = &v
m.SwapTotal = &val
m.SwapTotalBytes = &valBytes
case "SwapFree:":
m.SwapFree = &v
m.SwapFree = &val
m.SwapFreeBytes = &valBytes
case "Dirty:":
m.Dirty = &v
m.Dirty = &val
m.DirtyBytes = &valBytes
case "Writeback:":
m.Writeback = &v
m.Writeback = &val
m.WritebackBytes = &valBytes
case "AnonPages:":
m.AnonPages = &v
m.AnonPages = &val
m.AnonPagesBytes = &valBytes
case "Mapped:":
m.Mapped = &v
m.Mapped = &val
m.MappedBytes = &valBytes
case "Shmem:":
m.Shmem = &v
m.Shmem = &val
m.ShmemBytes = &valBytes
case "Slab:":
m.Slab = &v
m.Slab = &val
m.SlabBytes = &valBytes
case "SReclaimable:":
m.SReclaimable = &v
m.SReclaimable = &val
m.SReclaimableBytes = &valBytes
case "SUnreclaim:":
m.SUnreclaim = &v
m.SUnreclaim = &val
m.SUnreclaimBytes = &valBytes
case "KernelStack:":
m.KernelStack = &v
m.KernelStack = &val
m.KernelStackBytes = &valBytes
case "PageTables:":
m.PageTables = &v
m.PageTables = &val
m.PageTablesBytes = &valBytes
case "NFS_Unstable:":
m.NFSUnstable = &v
m.NFSUnstable = &val
m.NFSUnstableBytes = &valBytes
case "Bounce:":
m.Bounce = &v
m.Bounce = &val
m.BounceBytes = &valBytes
case "WritebackTmp:":
m.WritebackTmp = &v
m.WritebackTmp = &val
m.WritebackTmpBytes = &valBytes
case "CommitLimit:":
m.CommitLimit = &v
m.CommitLimit = &val
m.CommitLimitBytes = &valBytes
case "Committed_AS:":
m.CommittedAS = &v
m.CommittedAS = &val
m.CommittedASBytes = &valBytes
case "VmallocTotal:":
m.VmallocTotal = &v
m.VmallocTotal = &val
m.VmallocTotalBytes = &valBytes
case "VmallocUsed:":
m.VmallocUsed = &v
m.VmallocUsed = &val
m.VmallocUsedBytes = &valBytes
case "VmallocChunk:":
m.VmallocChunk = &v
m.VmallocChunk = &val
m.VmallocChunkBytes = &valBytes
case "Percpu:":
m.Percpu = &val
m.PercpuBytes = &valBytes
case "HardwareCorrupted:":
m.HardwareCorrupted = &v
m.HardwareCorrupted = &val
m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:":
m.AnonHugePages = &v
m.AnonHugePages = &val
m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:":
m.ShmemHugePages = &v
m.ShmemHugePages = &val
m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:":
m.ShmemPmdMapped = &v
m.ShmemPmdMapped = &val
m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:":
m.CmaTotal = &v
m.CmaTotal = &val
m.CmaTotalBytes = &valBytes
case "CmaFree:":
m.CmaFree = &v
m.CmaFree = &val
m.CmaFreeBytes = &valBytes
case "HugePages_Total:":
m.HugePagesTotal = &v
m.HugePagesTotal = &val
case "HugePages_Free:":
m.HugePagesFree = &v
m.HugePagesFree = &val
case "HugePages_Rsvd:":
m.HugePagesRsvd = &v
m.HugePagesRsvd = &val
case "HugePages_Surp:":
m.HugePagesSurp = &v
m.HugePagesSurp = &val
case "Hugepagesize:":
m.Hugepagesize = &v
m.Hugepagesize = &val
m.HugepagesizeBytes = &valBytes
case "DirectMap4k:":
m.DirectMap4k = &v
m.DirectMap4k = &val
m.DirectMap4kBytes = &valBytes
case "DirectMap2M:":
m.DirectMap2M = &v
m.DirectMap2M = &val
m.DirectMap2MBytes = &valBytes
case "DirectMap1G:":
m.DirectMap1G = &v
m.DirectMap1G = &val
m.DirectMap1GBytes = &valBytes
}
}

View File

@@ -50,10 +50,13 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
// Drops shows the total number of dropped packets of all UPD sockets.
Drops *uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
@@ -66,6 +69,7 @@ type (
RxQueue uint64
UID uint64
Inode uint64
Drops *uint64
}
)
@@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
defer f.Close()
var netIPSocket NetIPSocket
isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
@@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
var udpPacketDrops uint64
isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
if isUDP {
udpPacketDrops += *line.Drops
netIPSocketSummary.Drops = &udpPacketDrops
}
}
if err := s.Err(); err != nil {
return nil, err
@@ -149,7 +160,7 @@ func parseIP(hexIP string) (net.IP, error) {
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
@@ -224,5 +235,14 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
}
// drops
if isUDP {
drops, err := strconv.ParseUint(fields[12], 0, 64)
if err != nil {
return nil, fmt.Errorf("%s: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
}
line.Drops = &drops
}
return line, nil
}

119
vendor/github.com/prometheus/procfs/net_tls_stat.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2023 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
// TLSStat struct represents data in /proc/net/tls_stat.
// See https://docs.kernel.org/networking/tls.html#statistics
type TLSStat struct {
// number of TX sessions currently installed where host handles cryptography
TLSCurrTxSw int
// number of RX sessions currently installed where host handles cryptography
TLSCurrRxSw int
// number of TX sessions currently installed where NIC handles cryptography
TLSCurrTxDevice int
// number of RX sessions currently installed where NIC handles cryptography
TLSCurrRxDevice int
//number of TX sessions opened with host cryptography
TLSTxSw int
//number of RX sessions opened with host cryptography
TLSRxSw int
// number of TX sessions opened with NIC cryptography
TLSTxDevice int
// number of RX sessions opened with NIC cryptography
TLSRxDevice int
// record decryption failed (e.g. due to incorrect authentication tag)
TLSDecryptError int
// number of RX resyncs sent to NICs handling cryptography
TLSRxDeviceResync int
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
TLSDecryptRetry int
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
TLSRxNoPadViolation int
}
// NewTLSStat reads the tls_stat statistics.
func NewTLSStat() (TLSStat, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return TLSStat{}, err
}
return fs.NewTLSStat()
}
// NewTLSStat reads the tls_stat statistics.
func (fs FS) NewTLSStat() (TLSStat, error) {
file, err := os.Open(fs.proc.Path("net/tls_stat"))
if err != nil {
return TLSStat{}, err
}
defer file.Close()
var (
tlsstat = TLSStat{}
s = bufio.NewScanner(file)
)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
}
name := fields[0]
value, err := strconv.Atoi(fields[1])
if err != nil {
return TLSStat{}, err
}
switch name {
case "TlsCurrTxSw":
tlsstat.TLSCurrTxSw = value
case "TlsCurrRxSw":
tlsstat.TLSCurrRxSw = value
case "TlsCurrTxDevice":
tlsstat.TLSCurrTxDevice = value
case "TlsCurrRxDevice":
tlsstat.TLSCurrRxDevice = value
case "TlsTxSw":
tlsstat.TLSTxSw = value
case "TlsRxSw":
tlsstat.TLSRxSw = value
case "TlsTxDevice":
tlsstat.TLSTxDevice = value
case "TlsRxDevice":
tlsstat.TLSRxDevice = value
case "TlsDecryptError":
tlsstat.TLSDecryptError = value
case "TlsRxDeviceResync":
tlsstat.TLSRxDeviceResync = value
case "TlsDecryptRetry":
tlsstat.TLSDecryptRetry = value
case "TlsRxNoPadViolation":
tlsstat.TLSRxNoPadViolation = value
}
}
return tlsstat, s.Err()
}

View File

@@ -110,6 +110,11 @@ type ProcStat struct {
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
// Guest time of the process (time spent running a virtual CPU for a guest
// operating system), measured in clock ticks.
GuestTime int
// Guest time of the process's children, measured in clock ticks.
CGuestTime int
proc FS
}
@@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
&s.GuestTime,
&s.CGuestTime,
)
if err != nil {
return ProcStat{}, err

View File

@@ -17,7 +17,7 @@ linters:
- unused
# Our own extras:
- gofmt
- gofumpt
- nolintlint # lints nolint directives
- revive

10
vendor/go.uber.org/zap/.readme.tmpl generated vendored
View File

@@ -1,7 +1,15 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
<div align="center">
Blazing fast, structured, leveled logging in Go.
![Zap logo](assets/logo.png)
[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
</div>
## Installation
`go get -u go.uber.org/zap`
@@ -92,7 +100,7 @@ standard.
<hr>
Released under the [MIT License](LICENSE.txt).
Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are

52
vendor/go.uber.org/zap/CHANGELOG.md generated vendored
View File

@@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 1.27.0 (20 Feb 2024)
Enhancements:
* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
* [#1416][]: Add `WithPanicHook` option for testing panic logs.
Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
[#1378]: https://github.com/uber-go/zap/pull/1378
[#1399]: https://github.com/uber-go/zap/pull/1399
[#1406]: https://github.com/uber-go/zap/pull/1406
[#1416]: https://github.com/uber-go/zap/pull/1416
## 1.26.0 (14 Sep 2023)
Enhancements:
* [#1297][]: Add Dict as a Field.
* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
context.
* [#1350][]: String encoding is much (~50%) faster now.
Thanks to @jquirke, @cdvr1993 for their contributions to this release.
Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
[#1297]: https://github.com/uber-go/zap/pull/1297
[#1319]: https://github.com/uber-go/zap/pull/1319
[#1350]: https://github.com/uber-go/zap/pull/1350
@@ -352,7 +368,7 @@ to this release.
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
## v1.9.1 (06 Aug 2018)
## 1.9.1 (06 Aug 2018)
Bugfixes:
@@ -360,7 +376,7 @@ Bugfixes:
[#614]: https://github.com/uber-go/zap/pull/614
## v1.9.0 (19 Jul 2018)
## 1.9.0 (19 Jul 2018)
Enhancements:
* [#602][]: Reduce number of allocations when logging with reflection.
@@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
## v1.8.0 (13 Apr 2018)
## 1.8.0 (13 Apr 2018)
Enhancements:
* [#508][]: Make log level configurable when redirecting the standard
@@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release.
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
## v1.7.1 (25 Sep 2017)
## 1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
[#504]: https://github.com/uber-go/zap/pull/504
## v1.7.0 (21 Sep 2017)
## 1.7.0 (21 Sep 2017)
Enhancements:
@@ -407,7 +423,7 @@ Enhancements:
[#487]: https://github.com/uber-go/zap/pull/487
## v1.6.0 (30 Aug 2017)
## 1.6.0 (30 Aug 2017)
Enhancements:
@@ -418,7 +434,7 @@ Enhancements:
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
## v1.5.0 (22 Jul 2017)
## 1.5.0 (22 Jul 2017)
Enhancements:
@@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release.
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
## v1.4.1 (08 Jun 2017)
## 1.4.1 (08 Jun 2017)
This release fixes two bugs.
@@ -448,7 +464,7 @@ Bugfixes:
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
## v1.4.0 (12 May 2017)
## 1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@@ -464,7 +480,7 @@ Enhancements:
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
## v1.3.0 (25 Apr 2017)
## 1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -478,7 +494,7 @@ Enhancements:
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
## v1.2.0 (13 Apr 2017)
## 1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@@ -489,7 +505,7 @@ Enhancements:
[#402]: https://github.com/uber-go/zap/pull/402
## v1.1.0 (31 Mar 2017)
## 1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
It is fully backward-compatible.
@@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release.
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
## v1.0.0 (14 Mar 2017)
## 1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
further breaking changes will be made in the 1.x release series. Anyone using a
@@ -569,7 +585,7 @@ contributions to this release.
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
## v1.0.0-rc.3 (7 Mar 2017)
## 1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
breaking changes.
@@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release.
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
## v1.0.0-rc.2 (21 Feb 2017)
## 1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
breaking changes.
@@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release.
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
## v1.0.0-rc.1 (14 Feb 2017)
## 1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
breaking changes and improvements from the pre-release version. Most notably:
@@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably:
* Sampling is more accurate, and doesn't depend on the standard library's shared
timer heap.
## v0.1.0-beta.1 (6 Feb 2017)
## 0.1.0-beta.1 (6 Feb 2017)
This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
upgrade at their leisure. Since this is the first tagged release, there are no

66
vendor/go.uber.org/zap/README.md generated vendored
View File

@@ -1,7 +1,16 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
# :zap: zap
<div align="center">
Blazing fast, structured, leveled logging in Go.
![Zap logo](assets/logo.png)
[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
</div>
## Installation
`go get -u go.uber.org/zap`
@@ -66,41 +75,44 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 1744 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op
| zerolog | 918 ns/op | -47% | 1 allocs/op
| go-kit | 5590 ns/op | +221% | 57 allocs/op
| slog | 5640 ns/op | +223% | 40 allocs/op
| apex/log | 21184 ns/op | +1115% | 63 allocs/op
| logrus | 24338 ns/op | +1296% | 79 allocs/op
| log15 | 26054 ns/op | +1394% | 74 allocs/op
| :zap: zap | 656 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
| zerolog | 380 ns/op | -42% | 1 allocs/op
| go-kit | 2249 ns/op | +243% | 57 allocs/op
| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
| slog | 2481 ns/op | +278% | 42 allocs/op
| apex/log | 9591 ns/op | +1362% | 63 allocs/op
| log15 | 11393 ns/op | +1637% | 75 allocs/op
| logrus | 11654 ns/op | +1677% | 79 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 193 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op
| zerolog | 81 ns/op | -58% | 0 allocs/op
| slog | 322 ns/op | +67% | 0 allocs/op
| go-kit | 5377 ns/op | +2686% | 56 allocs/op
| apex/log | 19518 ns/op | +10013% | 53 allocs/op
| log15 | 19812 ns/op | +10165% | 70 allocs/op
| logrus | 21997 ns/op | +11297% | 68 allocs/op
| :zap: zap | 67 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
| zerolog | 35 ns/op | -48% | 0 allocs/op
| slog | 193 ns/op | +188% | 0 allocs/op
| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
| go-kit | 2460 ns/op | +3572% | 56 allocs/op
| log15 | 9038 ns/op | +13390% | 70 allocs/op
| apex/log | 9068 ns/op | +13434% | 53 allocs/op
| logrus | 10521 ns/op | +15603% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 165 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op
| zerolog | 95 ns/op | -42% | 0 allocs/op
| slog | 296 ns/op | +79% | 0 allocs/op
| go-kit | 415 ns/op | +152% | 9 allocs/op
| standard library | 422 ns/op | +156% | 2 allocs/op
| apex/log | 1601 ns/op | +870% | 5 allocs/op
| logrus | 3017 ns/op | +1728% | 23 allocs/op
| log15 | 3469 ns/op | +2002% | 20 allocs/op
| :zap: zap | 63 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
| zerolog | 32 ns/op | -49% | 0 allocs/op
| standard library | 124 ns/op | +97% | 1 allocs/op
| slog | 196 ns/op | +211% | 0 allocs/op
| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
| go-kit | 213 ns/op | +238% | 9 allocs/op
| apex/log | 771 ns/op | +1124% | 5 allocs/op
| logrus | 1439 ns/op | +2184% | 23 allocs/op
| log15 | 2069 ns/op | +3184% | 20 allocs/op
## Development Status: Stable
@@ -120,7 +132,7 @@ standard.
<hr>
Released under the [MIT License](LICENSE.txt).
Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are

View File

@@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
// AppendBytes writes a single byte to the Buffer.
// AppendBytes writes the given slice of bytes to the Buffer.
func (b *Buffer) AppendBytes(v []byte) {
b.bs = append(b.bs, v...)
}

2
vendor/go.uber.org/zap/field.go generated vendored
View File

@@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
// - https://github.com/uber-go/zap/pull/1304
// - https://github.com/uber-go/zap/pull/1305
// - https://github.com/uber-go/zap/pull/1308
//
// See https://github.com/golang/go/issues/62077 for upstream issue.
type anyFieldC[T any] func(string, T) Field
func (f anyFieldC[T]) Any(key string, val any) Field {

39
vendor/go.uber.org/zap/logger.go generated vendored
View File

@@ -43,6 +43,7 @@ type Logger struct {
development bool
addCaller bool
onPanic zapcore.CheckWriteHook // default is WriteThenPanic
onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
@@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
ce = ce.After(ent, zapcore.WriteThenPanic)
ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
onFatal := log.onFatal
// nil or WriteThenNoop will lead to continued execution after
// a Fatal log entry, which is unexpected. For example,
//
// f, err := os.Open(..)
// if err != nil {
// log.Fatal("cannot open", zap.Error(err))
// }
// fmt.Println(f.Name())
//
// The f.Name() will panic if we continue execution after the
// log.Fatal.
if onFatal == nil || onFatal == zapcore.WriteThenNoop {
onFatal = zapcore.WriteThenFatal
}
ce = ce.After(ent, onFatal)
ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
ce = ce.After(ent, zapcore.WriteThenPanic)
ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
@@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return ce
}
func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
// A nil or WriteThenNoop hook will lead to continued execution after
// a Panic or Fatal log entry, which is unexpected. For example,
//
// f, err := os.Open(..)
// if err != nil {
// log.Fatal("cannot open", zap.Error(err))
// }
// fmt.Println(f.Name())
//
// The f.Name() will panic if we continue execution after the log.Fatal.
if override == nil || override == zapcore.WriteThenNoop {
return defaultHook
}
return override
}

15
vendor/go.uber.org/zap/options.go generated vendored
View File

@@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
})
}
// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
// Zap will call this hook after writing a log statement with a Panic/DPanic level.
//
// For example, the following builds a logger that will exit the current
// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
//
// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
//
// This is useful for testing Panic/DPanic log output.
func WithPanicHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
log.onPanic = hook
})
}
// OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.

39
vendor/go.uber.org/zap/sugar.go generated vendored
View File

@@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
// WithLazy adds a variadic number of fields to the logging context lazily.
// The fields are evaluated only if the logger is further chained with [With]
// or is written to with any of the log level methods.
// Until that occurs, the logger may retain references to objects inside the fields,
// and logging will reflect the state of an object at the time of logging,
// not the time of WithLazy().
//
// Similar to [With], fields added to the child don't affect the parent,
// and vice versa. Also, the keys in key-value pairs should be strings. In development,
// passing a non-string key panics, while in production it logs an error and skips the pair.
// Passing an orphaned key has the same behavior.
func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
@@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
// Log logs the provided arguments at provided level.
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
s.log(lvl, "", args, nil)
}
// Debug logs the provided arguments at [DebugLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
@@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
// Logf formats the message according to the format specifier
// and logs it at provided level.
func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
s.log(lvl, template, args, nil)
}
// Debugf formats the message according to the format specifier
// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
@@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
// Logw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
s.log(lvl, msg, nil, keysAndValues)
}
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
@@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
// Logln logs a message at provided level.
// Spaces are always added between arguments.
func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
s.logln(lvl, args, nil)
}
// Debugln logs a message at [DebugLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Debugln(args ...interface{}) {

View File

@@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
// If this ever becomes a performance bottleneck, we can implement
// ArrayEncoder for our plain-text format.
arr := getSliceEncoder()
if c.TimeKey != "" && c.EncodeTime != nil {
if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
c.EncodeTime(ent.Time, arr)
}
if c.LevelKey != "" && c.EncodeLevel != nil {

View File

@@ -37,6 +37,9 @@ const DefaultLineEnding = "\n"
const OmitKey = ""
// A LevelEncoder serializes a Level to a primitive type.
//
// This function must make exactly one call
// to a PrimitiveArrayEncoder's Append* method.
type LevelEncoder func(Level, PrimitiveArrayEncoder)
// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error {
}
// A TimeEncoder serializes a time.Time to a primitive type.
//
// This function must make exactly one call
// to a PrimitiveArrayEncoder's Append* method.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
}
// A DurationEncoder serializes a time.Duration to a primitive type.
//
// This function must make exactly one call
// to a PrimitiveArrayEncoder's Append* method.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error {
}
// A CallerEncoder serializes an EntryCaller to a primitive type.
//
// This function must make exactly one call
// to a PrimitiveArrayEncoder's Append* method.
type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error {
// A NameEncoder serializes a period-separated logger name to a primitive
// type.
//
// This function must make exactly one call
// to a PrimitiveArrayEncoder's Append* method.
type NameEncoder func(string, PrimitiveArrayEncoder)
// FullNameEncoder serializes the logger name as-is.

View File

@@ -47,7 +47,7 @@ const (
ByteStringType
// Complex128Type indicates that the field carries a complex128.
Complex128Type
// Complex64Type indicates that the field carries a complex128.
// Complex64Type indicates that the field carries a complex64.
Complex64Type
// DurationType indicates that the field carries a time.Duration.
DurationType

View File

@@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AppendString(ent.Level.String())
}
}
if final.TimeKey != "" {
if final.TimeKey != "" && !ent.Time.IsZero() {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {

View File

@@ -36,16 +36,14 @@ const (
grpcLvlFatal
)
var (
// _grpcToZapLevel maps gRPC log levels to zap log levels.
// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
_grpcToZapLevel = map[int]zapcore.Level{
// _grpcToZapLevel maps gRPC log levels to zap log levels.
// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
var _grpcToZapLevel = map[int]zapcore.Level{
grpcLvlInfo: zapcore.InfoLevel,
grpcLvlWarn: zapcore.WarnLevel,
grpcLvlError: zapcore.ErrorLevel,
grpcLvlFatal: zapcore.FatalLevel,
}
)
}
// An Option overrides a Logger's default configuration.
type Option interface {

View File

@@ -308,6 +308,7 @@ var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].
// Toolchains must be named beginning with `go1`,
// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted.
// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+
var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`)
func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
@@ -384,7 +385,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
errorf("toolchain directive expects exactly one argument")
return
} else if strict && !ToolchainRE.MatchString(args[0]) {
errorf("invalid toolchain version '%s': must match format go1.23.0 or local", args[0])
errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0])
return
}
f.Toolchain = &Toolchain{Syntax: line}
@@ -630,7 +631,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string,
errorf("go directive expects exactly one argument")
return
} else if !GoVersionRE.MatchString(args[0]) {
errorf("invalid go version '%s': must match format 1.23", args[0])
errorf("invalid go version '%s': must match format 1.23.0", args[0])
return
}
@@ -646,7 +647,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string,
errorf("toolchain directive expects exactly one argument")
return
} else if !ToolchainRE.MatchString(args[0]) {
errorf("invalid toolchain version '%s': must match format go1.23 or local", args[0])
errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0])
return
}

View File

@@ -2911,6 +2911,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
fl = &cs.flow
}
if !fl.add(int32(f.Increment)) {
// For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR
if cs != nil {
rl.endStreamError(cs, StreamError{
StreamID: f.StreamID,
Code: ErrCodeFlowControl,
})
return nil
}
return ConnectionError(ErrCodeFlowControl)
}
cc.cond.Broadcast()

View File

@@ -6,10 +6,12 @@ package websocket
import (
"bufio"
"context"
"io"
"net"
"net/http"
"net/url"
"time"
)
// DialError is an error that occurs while dialling a websocket server.
@@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string {
// DialConfig opens a new client connection to a WebSocket with a config.
func DialConfig(config *Config) (ws *Conn, err error) {
var client net.Conn
return config.DialContext(context.Background())
}
// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation.
func (config *Config) DialContext(ctx context.Context) (*Conn, error) {
if config.Location == nil {
return nil, &DialError{config, ErrBadWebSocketLocation}
}
if config.Origin == nil {
return nil, &DialError{config, ErrBadWebSocketOrigin}
}
dialer := config.Dialer
if dialer == nil {
dialer = &net.Dialer{}
}
client, err = dialWithDialer(dialer, config)
client, err := dialWithDialer(ctx, dialer, config)
if err != nil {
goto Error
return nil, &DialError{config, err}
}
// Cleanup the connection if we fail to create the websocket successfully
success := false
defer func() {
if !success {
_ = client.Close()
}
}()
var ws *Conn
var wsErr error
doneConnecting := make(chan struct{})
go func() {
defer close(doneConnecting)
ws, err = NewClient(config, client)
if err != nil {
client.Close()
goto Error
wsErr = &DialError{config, err}
}
return
}()
Error:
return nil, &DialError{config, err}
// The websocket.NewClient() function can block indefinitely, make sure that we
// respect the deadlines specified by the context.
select {
case <-ctx.Done():
// Force the pending operations to fail, terminating the pending connection attempt
_ = client.SetDeadline(time.Now())
<-doneConnecting // Wait for the goroutine that tries to establish the connection to finish
return nil, &DialError{config, ctx.Err()}
case <-doneConnecting:
if wsErr == nil {
success = true // Disarm the deferred connection cleanup
}
return ws, wsErr
}
}

View File

@@ -5,18 +5,23 @@
package websocket
import (
"context"
"crypto/tls"
"net"
)
func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
switch config.Location.Scheme {
case "ws":
conn, err = dialer.Dial("tcp", parseAuthority(config.Location))
conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location))
case "wss":
conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig)
tlsDialer := &tls.Dialer{
NetDialer: dialer,
Config: config.TlsConfig,
}
conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location))
default:
err = ErrBadScheme
}

View File

@@ -22,7 +22,7 @@ import (
const (
adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
universeDomainDefault = "googleapis.com"
defaultUniverseDomain = "googleapis.com"
)
// Credentials holds Google credentials, including "Application Default Credentials".
@@ -58,7 +58,7 @@ type Credentials struct {
// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
func (c *Credentials) UniverseDomain() string {
if c.universeDomain == "" {
return universeDomainDefault
return defaultUniverseDomain
}
return c.universeDomain
}
@@ -89,7 +89,7 @@ func (c *Credentials) GetUniverseDomain() (string, error) {
// computeUniverseDomain that did not set universeDomain, set the default
// universe domain.
if c.universeDomain == "" {
c.universeDomain = universeDomainDefault
c.universeDomain = defaultUniverseDomain
}
return c.universeDomain, nil
}
@@ -103,7 +103,7 @@ func (c *Credentials) computeUniverseDomain() error {
if err != nil {
if _, ok := err.(metadata.NotDefinedError); ok {
// http.StatusNotFound (404)
c.universeDomain = universeDomainDefault
c.universeDomain = defaultUniverseDomain
return nil
} else {
return err
@@ -287,7 +287,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params
}
// Authorized user credentials are only supported in the googleapis.com universe.
if f.Type == userCredentialsKey {
universeDomain = universeDomainDefault
universeDomain = defaultUniverseDomain
}
ts, err := f.tokenSource(ctx, params)

View File

@@ -22,91 +22,9 @@
// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
// create an http.Client.
//
// # Workload Identity Federation
// # Workload and Workforce Identity Federation
//
// Using workload identity federation, your application can access Google Cloud
// resources from Amazon Web Services (AWS), Microsoft Azure or any identity
// provider that supports OpenID Connect (OIDC) or SAML 2.0.
// Traditionally, applications running outside Google Cloud have used service
// account keys to access Google Cloud resources. Using identity federation,
// you can allow your workload to impersonate a service account.
// This lets you access Google Cloud resources directly, eliminating the
// maintenance and security burden associated with service account keys.
//
// Follow the detailed instructions on how to configure Workload Identity Federation
// in various platforms:
//
// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws
// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure
// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc
// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml
//
// For OIDC and SAML providers, the library can retrieve tokens in three ways:
// from a local file location (file-sourced credentials), from a server
// (URL-sourced credentials), or from a local executable (executable-sourced
// credentials).
// For file-sourced credentials, a background process needs to be continuously
// refreshing the file location with a new OIDC/SAML token prior to expiration.
// For tokens with one hour lifetimes, the token needs to be updated in the file
// every hour. The token can be stored directly as plain text or in JSON format.
// For URL-sourced credentials, a local server needs to host a GET endpoint to
// return the OIDC/SAML token. The response can be in plain text or JSON.
// Additional required request headers can also be specified.
// For executable-sourced credentials, an application needs to be available to
// output the OIDC/SAML token and other information in a JSON format.
// For more information on how these work (and how to implement
// executable-sourced credentials), please check out:
// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration
//
// Note that this library does not perform any validation on the token_url, token_info_url,
// or service_account_impersonation_url fields of the credential configuration.
// It is not recommended to use a credential configuration that you did not generate with
// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain.
//
// # Workforce Identity Federation
//
// Workforce identity federation lets you use an external identity provider (IdP) to
// authenticate and authorize a workforce—a group of users, such as employees, partners,
// and contractors—using IAM, so that the users can access Google Cloud services.
// Workforce identity federation extends Google Cloud's identity capabilities to support
// syncless, attribute-based single sign on.
//
// With workforce identity federation, your workforce can access Google Cloud resources
// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or
// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation
// Services (AD FS), Okta, and others.
//
// Follow the detailed instructions on how to configure Workload Identity Federation
// in various platforms:
//
// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad
// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta
// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc
// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml
//
// For workforce identity federation, the library can retrieve tokens in three ways:
// from a local file location (file-sourced credentials), from a server
// (URL-sourced credentials), or from a local executable (executable-sourced
// credentials).
// For file-sourced credentials, a background process needs to be continuously
// refreshing the file location with a new OIDC/SAML token prior to expiration.
// For tokens with one hour lifetimes, the token needs to be updated in the file
// every hour. The token can be stored directly as plain text or in JSON format.
// For URL-sourced credentials, a local server needs to host a GET endpoint to
// return the OIDC/SAML token. The response can be in plain text or JSON.
// Additional required request headers can also be specified.
// For executable-sourced credentials, an application needs to be available to
// output the OIDC/SAML token and other information in a JSON format.
// For more information on how these work (and how to implement
// executable-sourced credentials), please check out:
// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in
//
// # Security considerations
//
// Note that this library does not perform any validation on the token_url, token_info_url,
// or service_account_impersonation_url fields of the credential configuration.
// It is not recommended to use a credential configuration that you did not generate with
// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain.
// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount].
//
// # Credentials
//

View File

@@ -26,22 +26,28 @@ import (
"golang.org/x/oauth2"
)
type awsSecurityCredentials struct {
// AwsSecurityCredentials models AWS security credentials.
type AwsSecurityCredentials struct {
// AccessKeyId is the AWS Access Key ID - Required.
AccessKeyID string `json:"AccessKeyID"`
// SecretAccessKey is the AWS Secret Access Key - Required.
SecretAccessKey string `json:"SecretAccessKey"`
SecurityToken string `json:"Token"`
// SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional.
SessionToken string `json:"Token"`
}
// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature.
type awsRequestSigner struct {
RegionName string
AwsSecurityCredentials awsSecurityCredentials
AwsSecurityCredentials *AwsSecurityCredentials
}
// getenv aliases os.Getenv for testing
var getenv = os.Getenv
const (
defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
// AWS Signature Version 4 signing algorithm identifier.
awsAlgorithm = "AWS4-HMAC-SHA256"
@@ -197,8 +203,8 @@ func (rs *awsRequestSigner) SignRequest(req *http.Request) error {
signedRequest.Header.Add("host", requestHost(req))
if rs.AwsSecurityCredentials.SecurityToken != "" {
signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken)
if rs.AwsSecurityCredentials.SessionToken != "" {
signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken)
}
if signedRequest.Header.Get("date") == "" {
@@ -251,16 +257,18 @@ func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp
}
type awsCredentialSource struct {
EnvironmentID string
RegionURL string
RegionalCredVerificationURL string
CredVerificationURL string
IMDSv2SessionTokenURL string
TargetResource string
environmentID string
regionURL string
regionalCredVerificationURL string
credVerificationURL string
imdsv2SessionTokenURL string
targetResource string
requestSigner *awsRequestSigner
region string
ctx context.Context
client *http.Client
awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier
supplierOptions SupplierOptions
}
type awsRequestHeader struct {
@@ -292,18 +300,25 @@ func canRetrieveSecurityCredentialFromEnvironment() bool {
return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != ""
}
func shouldUseMetadataServer() bool {
return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()
func (cs awsCredentialSource) shouldUseMetadataServer() bool {
return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment())
}
func (cs awsCredentialSource) credentialSourceType() string {
if cs.awsSecurityCredentialsSupplier != nil {
return "programmatic"
}
return "aws"
}
func (cs awsCredentialSource) subjectToken() (string, error) {
// Set Defaults
if cs.regionalCredVerificationURL == "" {
cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl
}
if cs.requestSigner == nil {
headers := make(map[string]string)
if shouldUseMetadataServer() {
if cs.shouldUseMetadataServer() {
awsSessionToken, err := cs.getAWSSessionToken()
if err != nil {
return "", err
@@ -318,8 +333,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) {
if err != nil {
return "", err
}
if cs.region, err = cs.getRegion(headers); err != nil {
cs.region, err = cs.getRegion(headers)
if err != nil {
return "", err
}
@@ -331,7 +346,7 @@ func (cs awsCredentialSource) subjectToken() (string, error) {
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil)
req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil)
if err != nil {
return "", err
}
@@ -339,8 +354,8 @@ func (cs awsCredentialSource) subjectToken() (string, error) {
// provider, with or without the HTTPS prefix.
// Including this header as part of the signature is recommended to
// ensure data integrity.
if cs.TargetResource != "" {
req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource)
if cs.targetResource != "" {
req.Header.Add("x-goog-cloud-target-resource", cs.targetResource)
}
cs.requestSigner.SignRequest(req)
@@ -387,11 +402,11 @@ func (cs awsCredentialSource) subjectToken() (string, error) {
}
func (cs *awsCredentialSource) getAWSSessionToken() (string, error) {
if cs.IMDSv2SessionTokenURL == "" {
if cs.imdsv2SessionTokenURL == "" {
return "", nil
}
req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil)
req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil)
if err != nil {
return "", err
}
@@ -410,25 +425,29 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) {
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody))
return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody))
}
return string(respBody), nil
}
func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) {
if cs.awsSecurityCredentialsSupplier != nil {
return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions)
}
if canRetrieveRegionFromEnvironment() {
if envAwsRegion := getenv(awsRegion); envAwsRegion != "" {
cs.region = envAwsRegion
return envAwsRegion, nil
}
return getenv("AWS_DEFAULT_REGION"), nil
}
if cs.RegionURL == "" {
return "", errors.New("oauth2/google: unable to determine AWS region")
if cs.regionURL == "" {
return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region")
}
req, err := http.NewRequest("GET", cs.RegionURL, nil)
req, err := http.NewRequest("GET", cs.regionURL, nil)
if err != nil {
return "", err
}
@@ -449,7 +468,7 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody))
return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody))
}
// This endpoint will return the region in format: us-east-2b.
@@ -461,12 +480,15 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err
return string(respBody[:respBodyEnd]), nil
}
func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) {
func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) {
if cs.awsSecurityCredentialsSupplier != nil {
return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions)
}
if canRetrieveSecurityCredentialFromEnvironment() {
return awsSecurityCredentials{
return &AwsSecurityCredentials{
AccessKeyID: getenv(awsAccessKeyId),
SecretAccessKey: getenv(awsSecretAccessKey),
SecurityToken: getenv(awsSessionToken),
SessionToken: getenv(awsSessionToken),
}, nil
}
@@ -481,20 +503,20 @@ func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string)
}
if credentials.AccessKeyID == "" {
return result, errors.New("oauth2/google: missing AccessKeyId credential")
return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential")
}
if credentials.SecretAccessKey == "" {
return result, errors.New("oauth2/google: missing SecretAccessKey credential")
return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential")
}
return credentials, nil
return &credentials, nil
}
func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) {
var result awsSecurityCredentials
func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) {
var result AwsSecurityCredentials
req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil)
req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil)
if err != nil {
return result, err
}
@@ -516,7 +538,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h
}
if resp.StatusCode != 200 {
return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody))
return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody))
}
err = json.Unmarshal(respBody, &result)
@@ -524,11 +546,11 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, h
}
func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) {
if cs.CredVerificationURL == "" {
return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint")
if cs.credVerificationURL == "" {
return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint")
}
req, err := http.NewRequest("GET", cs.CredVerificationURL, nil)
req, err := http.NewRequest("GET", cs.credVerificationURL, nil)
if err != nil {
return "", err
}
@@ -549,7 +571,7 @@ func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (s
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody))
return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody))
}
return string(respBody), nil

View File

@@ -0,0 +1,484 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package externalaccount provides support for creating workload identity
federation and workforce identity federation token sources that can be
used to access Google Cloud resources from external identity providers.
# Workload Identity Federation
Using workload identity federation, your application can access Google Cloud
resources from Amazon Web Services (AWS), Microsoft Azure or any identity
provider that supports OpenID Connect (OIDC) or SAML 2.0.
Traditionally, applications running outside Google Cloud have used service
account keys to access Google Cloud resources. Using identity federation,
you can allow your workload to impersonate a service account.
This lets you access Google Cloud resources directly, eliminating the
maintenance and security burden associated with service account keys.
Follow the detailed instructions on how to configure Workload Identity Federation
in various platforms:
Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws
Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure
OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc
SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml
For OIDC and SAML providers, the library can retrieve tokens in fours ways:
from a local file location (file-sourced credentials), from a server
(URL-sourced credentials), from a local executable (executable-sourced
credentials), or from a user defined function that returns an OIDC or SAML token.
For file-sourced credentials, a background process needs to be continuously
refreshing the file location with a new OIDC/SAML token prior to expiration.
For tokens with one hour lifetimes, the token needs to be updated in the file
every hour. The token can be stored directly as plain text or in JSON format.
For URL-sourced credentials, a local server needs to host a GET endpoint to
return the OIDC/SAML token. The response can be in plain text or JSON.
Additional required request headers can also be specified.
For executable-sourced credentials, an application needs to be available to
output the OIDC/SAML token and other information in a JSON format.
For more information on how these work (and how to implement
executable-sourced credentials), please check out:
https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration
To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers,
or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config].
The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google
Cloud resources. For instance, you can create a new client from the
[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource))
Note that this library does not perform any validation on the token_url, token_info_url,
or service_account_impersonation_url fields of the credential configuration.
It is not recommended to use a credential configuration that you did not generate with
the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain.
# Workforce Identity Federation
Workforce identity federation lets you use an external identity provider (IdP) to
authenticate and authorize a workforce—a group of users, such as employees, partners,
and contractors—using IAM, so that the users can access Google Cloud services.
Workforce identity federation extends Google Cloud's identity capabilities to support
syncless, attribute-based single sign on.
With workforce identity federation, your workforce can access Google Cloud resources
using an external identity provider (IdP) that supports OpenID Connect (OIDC) or
SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation
Services (AD FS), Okta, and others.
Follow the detailed instructions on how to configure Workload Identity Federation
in various platforms:
Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad
Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta
OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc
SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml
For workforce identity federation, the library can retrieve tokens in four ways:
from a local file location (file-sourced credentials), from a server
(URL-sourced credentials), from a local executable (executable-sourced
credentials), or from a user supplied function that returns an OIDC or SAML token.
For file-sourced credentials, a background process needs to be continuously
refreshing the file location with a new OIDC/SAML token prior to expiration.
For tokens with one hour lifetimes, the token needs to be updated in the file
every hour. The token can be stored directly as plain text or in JSON format.
For URL-sourced credentials, a local server needs to host a GET endpoint to
return the OIDC/SAML token. The response can be in plain text or JSON.
Additional required request headers can also be specified.
For executable-sourced credentials, an application needs to be available to
output the OIDC/SAML token and other information in a JSON format.
For more information on how these work (and how to implement
executable-sourced credentials), please check out:
https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in
To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers.
This can then be used when building a [Config].
The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google
Cloud resources. For instance, you can create a new client from the
[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource))
# Security considerations
Note that this library does not perform any validation on the token_url, token_info_url,
or service_account_impersonation_url fields of the credential configuration.
It is not recommended to use a credential configuration that you did not generate with
the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain.
*/
package externalaccount
import (
"context"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google/internal/impersonate"
"golang.org/x/oauth2/google/internal/stsexchange"
)
const (
universeDomainPlaceholder = "UNIVERSE_DOMAIN"
defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token"
defaultUniverseDomain = "googleapis.com"
)
// now aliases time.Now for testing
var now = func() time.Time {
return time.Now().UTC()
}
// Config stores the configuration for fetching tokens with external credentials.
type Config struct {
// Audience is the Secure Token Service (STS) audience which contains the resource name for the workload
// identity pool or the workforce pool and the provider identifier in that pool. Required.
Audience string
// SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec.
// Expected values include:
// “urn:ietf:params:oauth:token-type:jwt”
// “urn:ietf:params:oauth:token-type:id-token”
// “urn:ietf:params:oauth:token-type:saml2”
// “urn:ietf:params:aws:token-type:aws4_request”
// Required.
SubjectTokenType string
// TokenURL is the STS token exchange endpoint. If not provided, will default to
// https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the
// default service domain googleapis.com unless UniverseDomain is set.
// Optional.
TokenURL string
// TokenInfoURL is the token_info endpoint used to retrieve the account related information (
// user attributes like account identifier, eg. email, username, uid, etc). This is
// needed for gCloud session account identification. Optional.
TokenInfoURL string
// ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only
// required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional.
ServiceAccountImpersonationURL string
// ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation
// token will be valid for. If not provided, it will default to 3600. Optional.
ServiceAccountImpersonationLifetimeSeconds int
// ClientSecret is currently only required if token_info endpoint also
// needs to be called with the generated GCP access token. When provided, STS will be
// called with additional basic authentication using ClientId as username and ClientSecret as password. Optional.
ClientSecret string
// ClientID is only required in conjunction with ClientSecret, as described above. Optional.
ClientID string
// CredentialSource contains the necessary information to retrieve the token itself, as well
// as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or
// CredentialSource must be provided. Optional.
CredentialSource *CredentialSource
// QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries
// will set the x-goog-user-project header which overrides the project associated with the credentials. Optional.
QuotaProjectID string
// Scopes contains the desired scopes for the returned access token. Optional.
Scopes []string
// WorkforcePoolUserProject is the workforce pool user project number when the credential
// corresponds to a workforce pool and not a workload identity pool.
// The underlying principal must still have serviceusage.services.use IAM
// permission to use the project for billing/quota. Optional.
WorkforcePoolUserProject string
// SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials.
// One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional.
SubjectTokenSupplier SubjectTokenSupplier
// AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials.
// One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional.
AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier
// UniverseDomain is the default service domain for a given Cloud universe.
// This value will be used in the default STS token URL. The default value
// is "googleapis.com". It will not be used if TokenURL is set. Optional.
UniverseDomain string
}
var (
validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`)
)
func validateWorkforceAudience(input string) bool {
return validWorkforceAudiencePattern.MatchString(input)
}
// NewTokenSource Returns an external account TokenSource using the provided external account config.
func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) {
if conf.Audience == "" {
return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set")
}
if conf.SubjectTokenType == "" {
return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set")
}
if conf.WorkforcePoolUserProject != "" {
valid := validateWorkforceAudience(conf.Audience)
if !valid {
return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials")
}
}
count := 0
if conf.CredentialSource != nil {
count++
}
if conf.SubjectTokenSupplier != nil {
count++
}
if conf.AwsSecurityCredentialsSupplier != nil {
count++
}
if count == 0 {
return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set")
}
if count > 1 {
return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set")
}
return conf.tokenSource(ctx, "https")
}
// tokenSource is a private function that's directly called by some of the tests,
// because the unit test URLs are mocked, and would otherwise fail the
// validity check.
func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) {
ts := tokenSource{
ctx: ctx,
conf: c,
}
if c.ServiceAccountImpersonationURL == "" {
return oauth2.ReuseTokenSource(nil, ts), nil
}
scopes := c.Scopes
ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
imp := impersonate.ImpersonateTokenSource{
Ctx: ctx,
URL: c.ServiceAccountImpersonationURL,
Scopes: scopes,
Ts: oauth2.ReuseTokenSource(nil, ts),
TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds,
}
return oauth2.ReuseTokenSource(nil, imp), nil
}
// Subject token file types.
const (
fileTypeText = "text"
fileTypeJSON = "json"
)
// Format contains information needed to retireve a subject token for URL or File sourced credentials.
type Format struct {
// Type should be either "text" or "json". This determines whether the file or URL sourced credentials
// expect a simple text subject token or if the subject token will be contained in a JSON object.
// When not provided "text" type is assumed.
Type string `json:"type"`
// SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check
// for the subject token in the file or URL response. This would be "access_token" for azure.
SubjectTokenFieldName string `json:"subject_token_field_name"`
}
// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
type CredentialSource struct {
// File is the location for file sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
File string `json:"file"`
// Url is the URL to call for URL sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
URL string `json:"url"`
// Headers are the headers to attach to the request for URL sourced credentials.
Headers map[string]string `json:"headers"`
// Executable is the configuration object for executable sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
Executable *ExecutableConfig `json:"executable"`
// EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS".
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
EnvironmentID string `json:"environment_id"`
// RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials.
RegionURL string `json:"region_url"`
// RegionalCredVerificationURL is the AWS regional credential verification URL, will default to
// "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided."
RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
// IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS.
IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
// Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json".
Format Format `json:"format"`
}
// ExecutableConfig contains information needed for executable sourced credentials.
type ExecutableConfig struct {
// Command is the the full command to run to retrieve the subject token.
// This can include arguments. Must be an absolute path for the program. Required.
Command string `json:"command"`
// TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional.
TimeoutMillis *int `json:"timeout_millis"`
// OutputFile is the absolute path to the output file where the executable will cache the response.
// If specified the auth libraries will first check this location before running the executable. Optional.
OutputFile string `json:"output_file"`
}
// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token.
type SubjectTokenSupplier interface {
// SubjectToken should return a valid subject token or an error.
// The external account token source does not cache the returned subject token, so caching
// logic should be implemented in the supplier to prevent multiple requests for the same subject token.
SubjectToken(ctx context.Context, options SupplierOptions) (string, error)
}
// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to
// exchange for a GCP access token.
type AwsSecurityCredentialsSupplier interface {
// AwsRegion should return the AWS region or an error.
AwsRegion(ctx context.Context, options SupplierOptions) (string, error)
// GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error.
// The external account token source does not cache the returned security credentials, so caching
// logic should be implemented in the supplier to prevent multiple requests for the same security credentials.
AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error)
}
// SupplierOptions contains information about the requested subject token or AWS security credentials from the
// Google external account credential.
type SupplierOptions struct {
// Audience is the requested audience for the external account credential.
Audience string
// Subject token type is the requested subject token type for the external account credential. Expected values include:
// “urn:ietf:params:oauth:token-type:jwt”
// “urn:ietf:params:oauth:token-type:id-token”
// “urn:ietf:params:oauth:token-type:saml2”
// “urn:ietf:params:aws:token-type:aws4_request”
SubjectTokenType string
}
// tokenURL returns the default STS token endpoint with the configured universe
// domain.
func (c *Config) tokenURL() string {
if c.UniverseDomain == "" {
return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1)
}
return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1)
}
// parse determines the type of CredentialSource needed.
func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) {
//set Defaults
if c.TokenURL == "" {
c.TokenURL = c.tokenURL()
}
supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType}
if c.AwsSecurityCredentialsSupplier != nil {
awsCredSource := awsCredentialSource{
awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier,
targetResource: c.Audience,
supplierOptions: supplierOptions,
ctx: ctx,
}
return awsCredSource, nil
} else if c.SubjectTokenSupplier != nil {
return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil
} else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" {
if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil {
if awsVersion != 1 {
return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion)
}
awsCredSource := awsCredentialSource{
environmentID: c.CredentialSource.EnvironmentID,
regionURL: c.CredentialSource.RegionURL,
regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL,
credVerificationURL: c.CredentialSource.URL,
targetResource: c.Audience,
ctx: ctx,
}
if c.CredentialSource.IMDSv2SessionTokenURL != "" {
awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL
}
return awsCredSource, nil
}
} else if c.CredentialSource.File != "" {
return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil
} else if c.CredentialSource.URL != "" {
return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil
} else if c.CredentialSource.Executable != nil {
return createExecutableCredential(ctx, c.CredentialSource.Executable, c)
}
return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source")
}
type baseCredentialSource interface {
credentialSourceType() string
subjectToken() (string, error)
}
// tokenSource is the source that handles external credentials. It is used to retrieve Tokens.
type tokenSource struct {
ctx context.Context
conf *Config
}
func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string {
return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t",
goVersion(),
"unknown",
credSource.credentialSourceType(),
conf.ServiceAccountImpersonationURL != "",
conf.ServiceAccountImpersonationLifetimeSeconds != 0)
}
// Token allows tokenSource to conform to the oauth2.TokenSource interface.
func (ts tokenSource) Token() (*oauth2.Token, error) {
conf := ts.conf
credSource, err := conf.parse(ts.ctx)
if err != nil {
return nil, err
}
subjectToken, err := credSource.subjectToken()
if err != nil {
return nil, err
}
stsRequest := stsexchange.TokenExchangeRequest{
GrantType: "urn:ietf:params:oauth:grant-type:token-exchange",
Audience: conf.Audience,
Scope: conf.Scopes,
RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token",
SubjectToken: subjectToken,
SubjectTokenType: conf.SubjectTokenType,
}
header := make(http.Header)
header.Add("Content-Type", "application/x-www-form-urlencoded")
header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource))
clientAuth := stsexchange.ClientAuthentication{
AuthStyle: oauth2.AuthStyleInHeader,
ClientID: conf.ClientID,
ClientSecret: conf.ClientSecret,
}
var options map[string]interface{}
// Do not pass workforce_pool_user_project when client authentication is used.
// The client ID is sufficient for determining the user project.
if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" {
options = map[string]interface{}{
"userProject": conf.WorkforcePoolUserProject,
}
}
stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options)
if err != nil {
return nil, err
}
accessToken := &oauth2.Token{
AccessToken: stsResp.AccessToken,
TokenType: stsResp.TokenType,
}
if stsResp.ExpiresIn < 0 {
return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service")
} else if stsResp.ExpiresIn >= 0 {
accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second)
}
if stsResp.RefreshToken != "" {
accessToken.RefreshToken = stsResp.RefreshToken
}
return accessToken, nil
}

View File

@@ -19,7 +19,7 @@ import (
"time"
)
var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken")
var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken")
const (
executableSupportedMaxVersion = 1
@@ -39,51 +39,51 @@ func (nce nonCacheableError) Error() string {
}
func missingFieldError(source, field string) error {
return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field)
return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field)
}
func jsonParsingError(source, data string) error {
return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data)
return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data)
}
func malformedFailureError() error {
return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"}
return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"}
}
func userDefinedError(code, message string) error {
return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)}
return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)}
}
func unsupportedVersionError(source string, version int) error {
return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version)
return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version)
}
func tokenExpiredError() error {
return nonCacheableError{"oauth2/google: the token returned by the executable is expired"}
return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"}
}
func tokenTypeError(source string) error {
return fmt.Errorf("oauth2/google: %v contains unsupported token type", source)
return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source)
}
func exitCodeError(exitCode int) error {
return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode)
return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode)
}
func executableError(err error) error {
return fmt.Errorf("oauth2/google: executable command failed: %v", err)
return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err)
}
func executablesDisallowedError() error {
return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
}
func timeoutRangeError() error {
return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds")
return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds")
}
func commandMissingError() error {
return errors.New("oauth2/google: missing `command` field — executable command must be provided")
return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided")
}
type environment interface {
@@ -146,7 +146,7 @@ type executableCredentialSource struct {
// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig.
// It also performs defaulting and type conversions.
func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) {
func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) {
if ec.Command == "" {
return executableCredentialSource{}, commandMissingError()
}

View File

@@ -16,7 +16,7 @@ import (
type fileCredentialSource struct {
File string
Format format
Format Format
}
func (cs fileCredentialSource) credentialSourceType() string {
@@ -26,12 +26,12 @@ func (cs fileCredentialSource) credentialSourceType() string {
func (cs fileCredentialSource) subjectToken() (string, error) {
tokenFile, err := os.Open(cs.File)
if err != nil {
return "", fmt.Errorf("oauth2/google: failed to open credential file %q", cs.File)
return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File)
}
defer tokenFile.Close()
tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20))
if err != nil {
return "", fmt.Errorf("oauth2/google: failed to read credential file: %v", err)
return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err)
}
tokenBytes = bytes.TrimSpace(tokenBytes)
switch cs.Format.Type {
@@ -39,15 +39,15 @@ func (cs fileCredentialSource) subjectToken() (string, error) {
jsonData := make(map[string]interface{})
err = json.Unmarshal(tokenBytes, &jsonData)
if err != nil {
return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err)
return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err)
}
val, ok := jsonData[cs.Format.SubjectTokenFieldName]
if !ok {
return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials")
return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials")
}
token, ok := val.(string)
if !ok {
return "", errors.New("oauth2/google: improperly formatted subject token")
return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token")
}
return token, nil
case "text":
@@ -55,7 +55,7 @@ func (cs fileCredentialSource) subjectToken() (string, error) {
case "":
return string(tokenBytes), nil
default:
return "", errors.New("oauth2/google: invalid credential_source file format type")
return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type")
}
}

View File

@@ -0,0 +1,21 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package externalaccount
import "context"
type programmaticRefreshCredentialSource struct {
supplierOptions SupplierOptions
subjectTokenSupplier SubjectTokenSupplier
ctx context.Context
}
func (cs programmaticRefreshCredentialSource) credentialSourceType() string {
return "programmatic"
}
func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) {
return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions)
}

View File

@@ -19,7 +19,7 @@ import (
type urlCredentialSource struct {
URL string
Headers map[string]string
Format format
Format Format
ctx context.Context
}
@@ -31,7 +31,7 @@ func (cs urlCredentialSource) subjectToken() (string, error) {
client := oauth2.NewClient(cs.ctx, nil)
req, err := http.NewRequest("GET", cs.URL, nil)
if err != nil {
return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err)
return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err)
}
req = req.WithContext(cs.ctx)
@@ -40,16 +40,16 @@ func (cs urlCredentialSource) subjectToken() (string, error) {
}
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err)
return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err)
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err)
return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err)
}
if c := resp.StatusCode; c < 200 || c > 299 {
return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody)
return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody)
}
switch cs.Format.Type {
@@ -57,15 +57,15 @@ func (cs urlCredentialSource) subjectToken() (string, error) {
jsonData := make(map[string]interface{})
err = json.Unmarshal(respBody, &jsonData)
if err != nil {
return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err)
return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err)
}
val, ok := jsonData[cs.Format.SubjectTokenFieldName]
if !ok {
return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials")
return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials")
}
token, ok := val.(string)
if !ok {
return "", errors.New("oauth2/google: improperly formatted subject token")
return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token")
}
return token, nil
case "text":
@@ -73,7 +73,7 @@ func (cs urlCredentialSource) subjectToken() (string, error) {
case "":
return string(respBody), nil
default:
return "", errors.New("oauth2/google: invalid credential_source file format type")
return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type")
}
}

Some files were not shown because too many files have changed in this diff Show More