diff --git a/go.mod b/go.mod index 51d11502..393ac610 100644 --- a/go.mod +++ b/go.mod @@ -3,33 +3,33 @@ module github.com/datarhei/core/v16 go 1.18 require ( - github.com/99designs/gqlgen v0.17.42 + github.com/99designs/gqlgen v0.17.44 github.com/Masterminds/semver/v3 v3.2.1 github.com/atrox/haikunatorgo/v2 v2.0.1 github.com/caddyserver/certmagic v0.20.0 - github.com/datarhei/gosrt v0.5.5 - github.com/datarhei/joy4 v0.0.0-20240103155326-704dff2c27fb - github.com/go-playground/validator/v10 v10.16.0 + github.com/datarhei/gosrt v0.5.7 + github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7 + github.com/go-playground/validator/v10 v10.18.0 github.com/gobwas/glob v0.2.3 github.com/golang-jwt/jwt/v5 v5.2.0 - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.5.1 github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003 github.com/labstack/echo/v4 v4.11.4 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.66 + github.com/minio/minio-go/v7 v7.0.67 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 - github.com/prometheus/client_golang v1.18.0 - github.com/shirou/gopsutil/v3 v3.23.12 + github.com/prometheus/client_golang v1.19.0 + github.com/shirou/gopsutil/v3 v3.24.1 github.com/stretchr/testify v1.8.4 github.com/swaggo/echo-swagger v1.4.1 - github.com/swaggo/swag v1.16.2 - github.com/vektah/gqlparser/v2 v2.5.10 + github.com/swaggo/swag v1.16.3 + github.com/vektah/gqlparser/v2 v2.5.11 github.com/xeipuuv/gojsonschema v1.2.0 - go.uber.org/zap v1.26.0 - golang.org/x/mod v0.14.0 + go.uber.org/zap v1.27.0 + golang.org/x/mod v0.15.0 ) require ( @@ -47,7 +47,7 @@ require ( github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/spec v0.20.14 // indirect - github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect @@ -57,26 +57,25 @@ require ( github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/labstack/gommon v0.4.2 // indirect - github.com/leodido/go-urn v1.2.4 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/libdns/libdns v0.2.1 // indirect - github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mholt/acmez v1.2.0 // indirect - github.com/miekg/dns v1.1.57 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -86,21 +85,21 @@ require ( github.com/swaggo/files/v2 v2.0.0 // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect - github.com/urfave/cli/v2 v2.25.5 // indirect + github.com/urfave/cli/v2 v2.27.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/crypto v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 1033f36c..9dc29f8a 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/99designs/gqlgen v0.17.42 h1:BVWDOb2VVHQC5k3m6oa0XhDnxltLLrU4so7x/u39Zu4= -github.com/99designs/gqlgen v0.17.42/go.mod h1:GQ6SyMhwFbgHR0a8r2Wn8fYgEwPxxmndLFPhU63+cJE= +github.com/99designs/gqlgen v0.17.44 h1:OS2wLk/67Y+vXM75XHbwRnNYJcbuJd4OBL76RX3NQQA= +github.com/99designs/gqlgen v0.17.44/go.mod h1:UTCu3xpK2mLI5qcMNw+HKDiEL77it/1XtAjisC4sLwM= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= @@ -23,10 +23,10 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/datarhei/gosrt v0.5.5 h1:4Xx4v7pn/rz6EaWhZRE37fROW+rry0y4yBHZBkq5d8g= -github.com/datarhei/gosrt v0.5.5/go.mod h1:In1zba2/999S1d+ENIJ0h9s3alHO3FvCNrIpykxYbEE= -github.com/datarhei/joy4 v0.0.0-20240103155326-704dff2c27fb h1:zzF0wtKJKMIDlqHfjBl5WgTFO3LeKBoJNInyOrkJZPc= -github.com/datarhei/joy4 v0.0.0-20240103155326-704dff2c27fb/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw= +github.com/datarhei/gosrt v0.5.7 h1:1COeDgF0D0v0poWu0yKDC72d29x16Ma6VFR1icx+3Xc= +github.com/datarhei/gosrt v0.5.7/go.mod h1:ZicbsY9T2rXtWgQVBTR9ilnEkSYVSIb36hG9Lj7XCKM= +github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7 h1:MG5XQMTTDPcuvvRzc1c37QbwgDbYPhKmPFo9gSaPdBE= +github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -47,15 +47,15 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.16.0 h1:x+plE831WK4vaKHO/jpgUGsvLKIqRRkz6M78GuJAfGE= -github.com/go-playground/validator/v10 v10.16.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U= +github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= @@ -70,8 +70,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -87,12 +87,12 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003 h1:FyalHKl9hnJvhNbrABJXXjC2hG7gvIF0ioW9i0xHNQU= @@ -101,15 +101,15 @@ github.com/labstack/echo/v4 v4.11.4 h1:vDZmA+qNeh1pd/cCkEicDMrjtrnMGQ1QFI9gWN1zG github.com/labstack/echo/v4 v4.11.4/go.mod h1:noh7EvLwqDsmh/X/HWKPUl1AjzJrhyptRyEbQJfxen8= github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= -github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= -github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis= github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40= github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0= -github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -117,16 +117,14 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mholt/acmez v1.2.0 h1:1hhLxSgY5FvH5HCnGUuwbKY2VQVo8IU7rxXKSnZ7F30= github.com/mholt/acmez v1.2.0/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE= -github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= -github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= +github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= +github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -140,16 +138,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL2fgGwebuw7zvfnRym09r+dUGc= github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= @@ -158,8 +156,8 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -176,29 +174,28 @@ github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw= github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= -github.com/swaggo/swag v1.16.2 h1:28Pp+8DkQoV+HLzLx8RGJZXNGKbFqnuvSbAAtoxiY04= -github.com/swaggo/swag v1.16.2/go.mod h1:6YzXnDcpr0767iOejs318CwYkCQqyGer6BizOg03f+E= +github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= +github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= -github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc= -github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser/v2 v2.5.10 h1:6zSM4azXC9u4Nxy5YmdmGu4uKamfwsdKTwp5zsEealU= -github.com/vektah/gqlparser/v2 v2.5.10/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= +github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= +github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -208,25 +205,26 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -237,15 +235,15 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/vendor/github.com/99designs/gqlgen/.gitignore b/vendor/github.com/99designs/gqlgen/.gitignore index cbc986fb..22af48fb 100644 --- a/vendor/github.com/99designs/gqlgen/.gitignore +++ b/vendor/github.com/99designs/gqlgen/.gitignore @@ -16,3 +16,5 @@ *.out gqlgen *.exe + +node_modules diff --git a/vendor/github.com/99designs/gqlgen/CHANGELOG.md b/vendor/github.com/99designs/gqlgen/CHANGELOG.md index 03976bab..6357842b 100644 --- a/vendor/github.com/99designs/gqlgen/CHANGELOG.md +++ b/vendor/github.com/99designs/gqlgen/CHANGELOG.md @@ -5,10 +5,81 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.41...HEAD) +## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.42...HEAD) + +## [v0.17.42](https://github.com/99designs/gqlgen/compare/v0.17.41...v0.17.42) - 2023-12-29 +- 7bf0c223 release v0.17.42 + +
c811d47e fix: avoid panic from tracing on bad request (#2871) + +This fixes a panic which arises from the tracing components when a request has some defect which results in an error when creating the operation context. The transports consistently handle this by calling `DispatchError(graphql.WithOperationContext(ctx, rc), err)` where `rc` is the OperationContext which was not correctly constructed. This seems dangerous, because middleware may assume that if there in an `OperationContext` in the `context.Context` than they are being invoked on a normal codepath and can assume their other interceptors have been invoked in the normal order. Also, using a value returned by a function which also returned a non-nil error is very unusual. However, I have no idea what the impact of changing that dangerous behavior in the transports would be, so I opted to make the tracing component more resilient instead. + +
+ +- 13bb4152 fix for entity interfce code gen with related test (#2868) + +- 0354649c Remove archived dependency appdash (#2866) + +- 0d43599c Update examples go.mod with appdash replacements (#2863) + +- 7dd971c8 Use defer wg.Done() in FieldSet Dispatch (#2861) + +
24ea195c vikstrous/dataloadgen replaces recommended dataloader package in example docs (#2770) + +* update example for dataloadgen + +* improved example with link to example repo + +* undo unnecessary changes + +* fix wrong signature + +* fix creation of loader + +* Update docs/content/reference/dataloaders.md + +
+ +
42f6e39d Allow fields that return root level definitions (#2858) + +* generate structs for root level definitions to support fields that return Query, Mutation or Subscription + +* removed unnecessary comment + +* re-ran go generate + +--------- + +
+ +- 682a58dd Add go generate for examples so contributors never forget (#2859) + +
e080a96d Modify to prevent unreachable code from occurring (#2846) + +* fix: 型の数でソートする処理を追加 + +* 戻し + +* fix: case文の最初にスーパークラスが来ないようにする + +* testdata追加 + +* fix: Added sorting by number of types. +* fix: Prevent superclass from appearing at the beginning of case statement + +
+ +- 68744ad2 Bump changelog + +- e4cf21d2 v0.17.41 postrelease bump + + + + + ## [v0.17.41](https://github.com/99designs/gqlgen/compare/v0.17.40...v0.17.41) - 2023-12-03 - fe60938c release v0.17.41 diff --git a/vendor/github.com/99designs/gqlgen/api/generate.go b/vendor/github.com/99designs/gqlgen/api/generate.go index 8aecc4ca..8193f2fb 100644 --- a/vendor/github.com/99designs/gqlgen/api/generate.go +++ b/vendor/github.com/99designs/gqlgen/api/generate.go @@ -27,14 +27,18 @@ func Generate(cfg *config.Config, option ...Option) error { if cfg.Federation.IsDefined() { if cfg.Federation.Version == 0 { // default to using the user's choice of version, but if unset, try to sort out which federation version to use urlRegex := regexp.MustCompile(`(?s)@link.*\(.*url:.*?"(.*?)"[^)]+\)`) // regex to grab the url of a link directive, should it exist - + versionRegex := regexp.MustCompile(`v(\d+).(\d+)$`) // regex to grab the version number from a url // check the sources, and if one is marked as federation v2, we mark the entirety to be generated using that format for _, v := range cfg.Sources { cfg.Federation.Version = 1 urlString := urlRegex.FindStringSubmatch(v.Input) - if urlString != nil && urlString[1] == "https://specs.apollo.dev/federation/v2.0" { - cfg.Federation.Version = 2 - break + // e.g. urlString[1] == "https://specs.apollo.dev/federation/v2.7" + if urlString != nil { + matches := versionRegex.FindStringSubmatch(urlString[1]) + if matches[1] == "2" { + cfg.Federation.Version = 2 + break + } } } } diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/config.go b/vendor/github.com/99designs/gqlgen/codegen/config/config.go index 6238b770..5cbc6e24 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/config.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/config.go @@ -38,6 +38,7 @@ type Config struct { OmitComplexity bool `yaml:"omit_complexity,omitempty"` OmitGQLGenFileNotice bool `yaml:"omit_gqlgen_file_notice,omitempty"` OmitGQLGenVersionInFileNotice bool `yaml:"omit_gqlgen_version_in_file_notice,omitempty"` + OmitRootModels bool `yaml:"omit_root_models,omitempty"` StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"` ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"` ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"` diff --git a/vendor/github.com/99designs/gqlgen/generate_examples.sh b/vendor/github.com/99designs/gqlgen/generate_examples.sh new file mode 100644 index 00000000..814a8d85 --- /dev/null +++ b/vendor/github.com/99designs/gqlgen/generate_examples.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh +cd ./_examples +go generate ./... || return 0 diff --git a/vendor/github.com/99designs/gqlgen/graphql/uint.go b/vendor/github.com/99designs/gqlgen/graphql/uint.go index 26e6aa6b..8730d900 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/uint.go +++ b/vendor/github.com/99designs/gqlgen/graphql/uint.go @@ -2,6 +2,7 @@ package graphql import ( "encoding/json" + "errors" "fmt" "io" "strconv" @@ -19,8 +20,16 @@ func UnmarshalUint(v interface{}) (uint, error) { u64, err := strconv.ParseUint(v, 10, 64) return uint(u64), err case int: + if v < 0 { + return 0, errors.New("cannot convert negative numbers to uint") + } + return uint(v), nil case int64: + if v < 0 { + return 0, errors.New("cannot convert negative numbers to uint") + } + return uint(v), nil case json.Number: u64, err := strconv.ParseUint(string(v), 10, 64) @@ -41,8 +50,16 @@ func UnmarshalUint64(v interface{}) (uint64, error) { case string: return strconv.ParseUint(v, 10, 64) case int: + if v < 0 { + return 0, errors.New("cannot convert negative numbers to uint64") + } + return uint64(v), nil case int64: + if v < 0 { + return 0, errors.New("cannot convert negative numbers to uint64") + } + return uint64(v), nil case json.Number: return strconv.ParseUint(string(v), 10, 64) @@ -66,8 +83,16 @@ func UnmarshalUint32(v interface{}) (uint32, error) { } return uint32(iv), nil case int: + if v < 0 { + return 0, errors.New("cannot convert negative numbers to uint32") + } + return uint32(v), nil case int64: + if v < 0 { + return 0, errors.New("cannot convert negative numbers to uint32") + } + return uint32(v), nil case json.Number: iv, err := strconv.ParseUint(string(v), 10, 32) diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go index b987f6ed..c8d052f9 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/version.go +++ b/vendor/github.com/99designs/gqlgen/graphql/version.go @@ -1,3 +1,3 @@ package graphql -const Version = "v0.17.42" +const Version = "v0.17.44" diff --git a/vendor/github.com/99designs/gqlgen/main.go b/vendor/github.com/99designs/gqlgen/main.go index f3b9f440..a50f1e27 100644 --- a/vendor/github.com/99designs/gqlgen/main.go +++ b/vendor/github.com/99designs/gqlgen/main.go @@ -1,6 +1,6 @@ package main -//go:generate go generate ./_examples/... +//go:generate sh generate_examples.sh import ( "bytes" diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go index 7d988503..574e49b1 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go @@ -62,6 +62,9 @@ func (f *federation) MutateConfig(cfg *config.Config) error { "federation__Scope": { Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"}, }, + "federation__Policy": { + Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"}, + }, } for typeName, entry := range builtins { @@ -85,6 +88,7 @@ func (f *federation) MutateConfig(cfg *config.Config) error { cfg.Directives["inaccessible"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["authenticated"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["requiresScopes"] = config.DirectiveConfig{SkipRuntime: true} + cfg.Directives["policy"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["interfaceObject"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["composeDirective"] = config.DirectiveConfig{SkipRuntime: true} } @@ -126,7 +130,13 @@ func (f *federation) InjectSourceEarly() *ast.Source { | UNION directive @interfaceObject on OBJECT directive @link(import: [String!], url: String!) repeatable on SCHEMA - directive @override(from: String!) on FIELD_DEFINITION + directive @override(from: String!, label: String) on FIELD_DEFINITION + directive @policy(policies: [[federation__Policy!]!]!) on + | FIELD_DEFINITION + | OBJECT + | INTERFACE + | SCALAR + | ENUM directive @provides(fields: FieldSet!) on FIELD_DEFINITION directive @requires(fields: FieldSet!) on FIELD_DEFINITION directive @requiresScopes(scopes: [[federation__Scope!]!]!) on @@ -149,6 +159,7 @@ func (f *federation) InjectSourceEarly() *ast.Source { | UNION scalar _Any scalar FieldSet + scalar federation__Policy scalar federation__Scope ` } diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl index 7cf84287..288189d7 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl @@ -132,7 +132,13 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati {{ range $_, $entity := .Entities }} {{ if and .Resolvers .Multi -}} case "{{.Def.Name}}": - {{range $i, $_ := .Resolvers -}} + resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0]) + if err != nil { + return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err) + } + switch resolverName { + {{ range $i, $resolver := .Resolvers }} + case "{{.ResolverName}}": _reps := make([]*{{.LookupInputType}}, len(reps)) for i, rep := range reps { @@ -166,6 +172,9 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati } return nil {{ end }} + default: + return fmt.Errorf("unknown resolver: %s", resolverName) + } {{ end }} {{- end }} default: diff --git a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go index c7a27988..fc37c7e2 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go +++ b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go @@ -139,10 +139,12 @@ func (m *Plugin) MutateConfig(cfg *config.Config) error { b.Interfaces = append(b.Interfaces, it) case ast.Object, ast.InputObject: if cfg.IsRoot(schemaType) { - b.Models = append(b.Models, &Object{ - Description: schemaType.Description, - Name: schemaType.Name, - }) + if !cfg.OmitRootModels { + b.Models = append(b.Models, &Object{ + Description: schemaType.Description, + Name: schemaType.Name, + }) + } continue } diff --git a/vendor/github.com/datarhei/gosrt/connection.go b/vendor/github.com/datarhei/gosrt/connection.go index a14cf125..966a1441 100644 --- a/vendor/github.com/datarhei/gosrt/connection.go +++ b/vendor/github.com/datarhei/gosrt/connection.go @@ -281,9 +281,9 @@ func newSRTConn(config srtConnConfig) *srtConn { c.ctx, c.cancelCtx = context.WithCancel(context.Background()) - go c.networkQueueReader() - go c.writeQueueReader() - go c.ticker() + go c.networkQueueReader(c.ctx) + go c.writeQueueReader(c.ctx) + go c.ticker(c.ctx) c.debug.expectedRcvPacketSequenceNumber = c.initialPacketSequenceNumber c.debug.expectedReadPacketSequenceNumber = c.initialPacketSequenceNumber @@ -311,11 +311,19 @@ func newSRTConn(config srtConnConfig) *srtConn { } func (c *srtConn) LocalAddr() net.Addr { + if c.localAddr == nil { + return nil + } + addr, _ := net.ResolveUDPAddr("udp", c.localAddr.String()) return addr } func (c *srtConn) RemoteAddr() net.Addr { + if c.remoteAddr == nil { + return nil + } + addr, _ := net.ResolveUDPAddr("udp", c.remoteAddr.String()) return addr } @@ -338,7 +346,7 @@ func (c *srtConn) Version() uint32 { // ticker invokes the congestion control in regular intervals with // the current connection time. -func (c *srtConn) ticker() { +func (c *srtConn) ticker(ctx context.Context) { ticker := time.NewTicker(c.tick) defer ticker.Stop() defer func() { @@ -347,7 +355,7 @@ func (c *srtConn) ticker() { for { select { - case <-c.ctx.Done(): + case <-ctx.Done(): return case t := <-ticker.C: tickTime := uint64(t.Sub(c.start).Microseconds()) @@ -429,7 +437,7 @@ func (c *srtConn) Write(b []byte) (int, error) { return 0, err } - p := packet.NewPacket(nil, nil) + p := packet.NewPacket(nil) p.SetData(c.writeData[:n]) @@ -526,14 +534,14 @@ func (c *srtConn) pop(p packet.Packet) { } // networkQueueReader reads the packets from the network queue in order to process them. -func (c *srtConn) networkQueueReader() { +func (c *srtConn) networkQueueReader(ctx context.Context) { defer func() { c.log("connection:close", func() string { return "left network queue reader loop" }) }() for { select { - case <-c.ctx.Done(): + case <-ctx.Done(): return case p := <-c.networkQueue: c.handlePacket(p) @@ -543,14 +551,14 @@ func (c *srtConn) networkQueueReader() { // writeQueueReader reads the packets from the write queue and puts them into congestion // control for sending. -func (c *srtConn) writeQueueReader() { +func (c *srtConn) writeQueueReader(ctx context.Context) { defer func() { c.log("connection:close", func() string { return "left write queue reader loop" }) }() for { select { - case <-c.ctx.Done(): + case <-ctx.Done(): return case p := <-c.writeQueue: // Put the packet into the send congestion control @@ -1133,7 +1141,7 @@ func (c *srtConn) handleKMResponse(p packet.Packet) { // sendShutdown sends a shutdown packet to the peer. func (c *srtConn) sendShutdown() { - p := packet.NewPacket(c.remoteAddr, nil) + p := packet.NewPacket(c.remoteAddr) p.Header().IsControlPacket = true @@ -1156,7 +1164,7 @@ func (c *srtConn) sendShutdown() { // sendNAK sends a NAK to the peer with the given range of sequence numbers. func (c *srtConn) sendNAK(from, to circular.Number) { - p := packet.NewPacket(c.remoteAddr, nil) + p := packet.NewPacket(c.remoteAddr) p.Header().IsControlPacket = true @@ -1182,7 +1190,7 @@ func (c *srtConn) sendNAK(from, to circular.Number) { // sendACK sends an ACK to the peer with the given sequence number. func (c *srtConn) sendACK(seq circular.Number, lite bool) { - p := packet.NewPacket(c.remoteAddr, nil) + p := packet.NewPacket(c.remoteAddr) p.Header().IsControlPacket = true @@ -1233,7 +1241,7 @@ func (c *srtConn) sendACK(seq circular.Number, lite bool) { // sendACKACK sends an ACKACK to the peer with the given ACK sequence. func (c *srtConn) sendACKACK(ackSequence uint32) { - p := packet.NewPacket(c.remoteAddr, nil) + p := packet.NewPacket(c.remoteAddr) p.Header().IsControlPacket = true @@ -1280,7 +1288,7 @@ func (c *srtConn) sendHSRequest() { SendTSBPDDelay: uint16(c.config.ReceiverLatency.Milliseconds()), } - p := packet.NewPacket(c.remoteAddr, nil) + p := packet.NewPacket(c.remoteAddr) p.Header().IsControlPacket = true @@ -1319,7 +1327,7 @@ func (c *srtConn) sendKMRequest(key packet.PacketEncryption) { c.crypto.MarshalKM(cif, c.config.Passphrase, key) - p := packet.NewPacket(c.remoteAddr, nil) + p := packet.NewPacket(c.remoteAddr) p.Header().IsControlPacket = true diff --git a/vendor/github.com/datarhei/gosrt/dial.go b/vendor/github.com/datarhei/gosrt/dial.go index c0c71c92..8feea5fb 100644 --- a/vendor/github.com/datarhei/gosrt/dial.go +++ b/vendor/github.com/datarhei/gosrt/dial.go @@ -157,8 +157,8 @@ func Dial(network, address string, config Config) (Conn, error) { return } - p := packet.NewPacket(dl.remoteAddr, buffer[:n]) - if p == nil { + p, err := packet.NewPacketFromData(dl.remoteAddr, buffer[:n]) + if err != nil { continue } @@ -532,7 +532,7 @@ func (dl *dialer) handleHandshake(p packet.Packet) { } func (dl *dialer) sendInduction() { - p := packet.NewPacket(dl.remoteAddr, nil) + p := packet.NewPacket(dl.remoteAddr) p.Header().IsControlPacket = true @@ -567,7 +567,7 @@ func (dl *dialer) sendInduction() { } func (dl *dialer) sendShutdown(peerSocketId uint32) { - p := packet.NewPacket(dl.remoteAddr, nil) + p := packet.NewPacket(dl.remoteAddr) data := [4]byte{} binary.BigEndian.PutUint32(data[0:], 0) @@ -588,26 +588,68 @@ func (dl *dialer) sendShutdown(peerSocketId uint32) { } func (dl *dialer) LocalAddr() net.Addr { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return nil + } + return dl.conn.LocalAddr() } func (dl *dialer) RemoteAddr() net.Addr { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return nil + } + return dl.conn.RemoteAddr() } func (dl *dialer) SocketId() uint32 { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return 0 + } + return dl.conn.SocketId() } func (dl *dialer) PeerSocketId() uint32 { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return 0 + } + return dl.conn.PeerSocketId() } func (dl *dialer) StreamId() string { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return "" + } + return dl.conn.StreamId() } func (dl *dialer) Version() uint32 { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return 0 + } + return dl.conn.Version() } @@ -652,6 +694,10 @@ func (dl *dialer) Read(p []byte) (n int, err error) { dl.connLock.RLock() defer dl.connLock.RUnlock() + if dl.conn == nil { + return 0, fmt.Errorf("no connection") + } + return dl.conn.Read(p) } @@ -663,6 +709,10 @@ func (dl *dialer) readPacket() (packet.Packet, error) { dl.connLock.RLock() defer dl.connLock.RUnlock() + if dl.conn == nil { + return nil, fmt.Errorf("no connection") + } + return dl.conn.readPacket() } @@ -674,6 +724,10 @@ func (dl *dialer) Write(p []byte) (n int, err error) { dl.connLock.RLock() defer dl.connLock.RUnlock() + if dl.conn == nil { + return 0, fmt.Errorf("no connection") + } + return dl.conn.Write(p) } @@ -685,14 +739,32 @@ func (dl *dialer) writePacket(p packet.Packet) error { dl.connLock.RLock() defer dl.connLock.RUnlock() + if dl.conn == nil { + return fmt.Errorf("no connection") + } + return dl.conn.writePacket(p) } func (dl *dialer) SetDeadline(t time.Time) error { return dl.conn.SetDeadline(t) } func (dl *dialer) SetReadDeadline(t time.Time) error { return dl.conn.SetReadDeadline(t) } func (dl *dialer) SetWriteDeadline(t time.Time) error { return dl.conn.SetWriteDeadline(t) } -func (dl *dialer) Stats(s *Statistics) { dl.conn.Stats(s) } + +func (dl *dialer) Stats(s *Statistics) { + dl.connLock.RLock() + defer dl.connLock.RUnlock() + + if dl.conn == nil { + return + } + + dl.conn.Stats(s) +} func (dl *dialer) log(topic string, message func() string) { + if dl.config.Logger == nil { + return + } + dl.config.Logger.Print(topic, dl.socketId, 2, message) } diff --git a/vendor/github.com/datarhei/gosrt/internal/crypto/crypto.go b/vendor/github.com/datarhei/gosrt/internal/crypto/crypto.go index a4c14474..a77df9e6 100644 --- a/vendor/github.com/datarhei/gosrt/internal/crypto/crypto.go +++ b/vendor/github.com/datarhei/gosrt/internal/crypto/crypto.go @@ -113,13 +113,24 @@ func (c *crypto) generateSEK(keyLength int) ([]byte, error) { var ErrInvalidKey = errors.New("crypto: invalid key for encryption. Must be even, odd, or both") // ErrInvalidWrap is returned when the packet encryption indicates a different length of the wrapped key -var ErrInvalidWrap = errors.New("crypto: the unwrapped key has the wrong length") +var ErrInvalidWrap = errors.New("crypto: the un/wrapped key has the wrong length") func (c *crypto) UnmarshalKM(km *packet.CIFKeyMaterialExtension, passphrase string) error { if km.KeyBasedEncryption == packet.UnencryptedPacket || !km.KeyBasedEncryption.IsValid() { return ErrInvalidKey } + n := 1 + if km.KeyBasedEncryption == packet.EvenAndOddKey { + n = 2 + } + + wrapLength := n * c.keyLength + + if len(km.Wrap)-8 != wrapLength { + return ErrInvalidWrap + } + if len(km.Salt) != 0 { copy(c.salt, km.Salt) } @@ -131,12 +142,7 @@ func (c *crypto) UnmarshalKM(km *packet.CIFKeyMaterialExtension, passphrase stri return err } - n := 1 - if km.KeyBasedEncryption == packet.EvenAndOddKey { - n = 2 - } - - if len(unwrap) != n*c.keyLength { + if len(unwrap) != wrapLength { return ErrInvalidWrap } @@ -223,6 +229,10 @@ func (c *crypto) EncryptOrDecryptPayload(data []byte, key packet.PacketEncryptio // // CTR = (MSB(112, Salt) XOR psn) << 16 + if len(c.salt) != 16 { + return fmt.Errorf("crypto: invalid salt. Must be of length 16 bytes") + } + ctr := make([]byte, 16) binary.BigEndian.PutUint32(ctr[10:], packetSequenceNumber) diff --git a/vendor/github.com/datarhei/gosrt/internal/net/ip.go b/vendor/github.com/datarhei/gosrt/internal/net/ip.go index 32dd5faa..c5303c4e 100644 --- a/vendor/github.com/datarhei/gosrt/internal/net/ip.go +++ b/vendor/github.com/datarhei/gosrt/internal/net/ip.go @@ -12,11 +12,11 @@ type IP struct { } func (i *IP) setDefault() { - i.ip = net.ParseIP("127.0.0.1") + i.ip = net.IPv4(127, 0, 0, 1) } func (i *IP) isValid() bool { - if i.ip.String() == "" || i.ip.IsUnspecified() { + if i.ip == nil || i.ip.String() == "" || i.ip.IsUnspecified() { return false } @@ -24,11 +24,22 @@ func (i *IP) isValid() bool { } func (i IP) String() string { + if i.ip == nil { + return "" + } + return i.ip.String() } func (i *IP) Parse(ip string) { - i.ip = net.ParseIP(ip) + i.setDefault() + + iip := net.ParseIP(ip) + if iip == nil { + return + } + + i.ip = iip if !i.isValid() { i.setDefault() @@ -36,7 +47,16 @@ func (i *IP) Parse(ip string) { } func (i *IP) FromNetIP(ip net.IP) { - i.ip = net.ParseIP(ip.String()) + if ip == nil { + return + } + + iip := net.ParseIP(ip.String()) + if iip == nil { + return + } + + i.ip = iip if !i.isValid() { i.setDefault() @@ -44,12 +64,22 @@ func (i *IP) FromNetIP(ip net.IP) { } func (i *IP) FromNetAddr(addr net.Addr) { + if addr == nil { + i.setDefault() + return + } + if addr.Network() != "udp" { i.setDefault() + return } if a, err := net.ResolveUDPAddr("udp", addr.String()); err == nil { - i.ip = a.IP + if a == nil || a.IP == nil { + i.setDefault() + } else { + i.ip = a.IP + } } else { i.setDefault() } @@ -85,7 +115,12 @@ func (i *IP) Unmarshal(data []byte) error { fmt.Fprintf(&b, "%04x:", (ip3&0xffff0000)>>16) fmt.Fprintf(&b, "%04x", ip3&0x0000ffff) - i.ip = net.ParseIP(b.String()) + iip := net.ParseIP(b.String()) + if iip == nil { + return fmt.Errorf("invalid ip") + } + + i.ip = iip } } @@ -98,6 +133,14 @@ func (i *IP) Unmarshal(data []byte) error { // Marshal converts an IP to 16 byte host byte order func (i *IP) Marshal(data []byte) { + if i.ip == nil || !i.isValid() { + i.setDefault() + } + + if len([]byte(i.ip)) == 4 { + i.ip = net.IPv4(i.ip[0], i.ip[1], i.ip[2], i.ip[3]) + } + if len(data) < 16 { return } diff --git a/vendor/github.com/datarhei/gosrt/internal/packet/packet.go b/vendor/github.com/datarhei/gosrt/internal/packet/packet.go index 940759f6..a31160da 100644 --- a/vendor/github.com/datarhei/gosrt/internal/packet/packet.go +++ b/vendor/github.com/datarhei/gosrt/internal/packet/packet.go @@ -9,6 +9,7 @@ import ( "io" "net" "sort" + "strconv" "strings" "sync" @@ -78,26 +79,6 @@ const ( HSTYPE_INDUCTION HandshakeType = 0x00000001 ) -// Table 7: Handshake Rejection Reason Codes -const ( - REJ_UNKNOWN HandshakeType = 1000 - REJ_SYSTEM HandshakeType = 1001 - REJ_PEER HandshakeType = 1002 - REJ_RESOURCE HandshakeType = 1003 - REJ_ROGUE HandshakeType = 1004 - REJ_BACKLOG HandshakeType = 1005 - REJ_IPE HandshakeType = 1006 - REJ_CLOSE HandshakeType = 1007 - REJ_VERSION HandshakeType = 1008 - REJ_RDVCOOKIE HandshakeType = 1009 - REJ_BADSECRET HandshakeType = 1010 - REJ_UNSECURE HandshakeType = 1011 - REJ_MESSAGEAPI HandshakeType = 1012 - REJ_CONGESTION HandshakeType = 1013 - REJ_FILTER HandshakeType = 1014 - REJ_GROUP HandshakeType = 1015 -) - func (h HandshakeType) String() string { switch h { case HSTYPE_DONE: @@ -110,45 +91,9 @@ func (h HandshakeType) String() string { return "WAVEHAND" case HSTYPE_INDUCTION: return "INDUCTION" - case REJ_UNKNOWN: - return "REJ_UNKNOWN (unknown reason)" - case REJ_SYSTEM: - return "REJ_SYSTEM (system function error)" - case REJ_PEER: - return "REJ_PEER (rejected by peer)" - case REJ_RESOURCE: - return "REJ_RESOURCE (resource allocation problem)" - case REJ_ROGUE: - return "REJ_ROGUE (incorrect data in handshake)" - case REJ_BACKLOG: - return "REJ_BACKLOG (listener's backlog exceeded)" - case REJ_IPE: - return "REJ_IPE (internal program error)" - case REJ_CLOSE: - return "REJ_CLOSE (socket is closing)" - case REJ_VERSION: - return "REJ_VERSION (peer is older version than agent's min)" - case REJ_RDVCOOKIE: - return "REJ_RDVCOOKIE (rendezvous cookie collision)" - case REJ_BADSECRET: - return "REJ_BADSECRET (wrong password)" - case REJ_UNSECURE: - return "REJ_UNSECURE (password required or unexpected)" - case REJ_MESSAGEAPI: - return "REJ_MESSAGEAPI (stream flag collision)" - case REJ_CONGESTION: - return "REJ_CONGESTION (incompatible congestion-controller type)" - case REJ_FILTER: - return "REJ_FILTER (incompatible packet filter)" - case REJ_GROUP: - return "REJ_GROUP (incompatible group)" } - return "unknown" -} - -func (h HandshakeType) IsUnknown() bool { - return h.String() == "unknown" + return "REJECT (" + strconv.FormatUint(uint64(h), 32) + ")" } func (h HandshakeType) IsHandshake() bool { @@ -166,13 +111,7 @@ func (h HandshakeType) IsHandshake() bool { } func (h HandshakeType) IsRejection() bool { - if h.IsUnknown() { - return false - } else if h.IsHandshake() { - return false - } - - return true + return !h.IsHandshake() } func (h HandshakeType) Val() uint32 { @@ -311,7 +250,20 @@ func (p *pool) Put(b *bytes.Buffer) { var payloadPool *pool = newPool() -func NewPacket(addr net.Addr, rawdata []byte) Packet { +func NewPacketFromData(addr net.Addr, rawdata []byte) (Packet, error) { + p := NewPacket(addr) + + if len(rawdata) != 0 { + if err := p.Unmarshal(rawdata); err != nil { + p.Decommission() + return nil, fmt.Errorf("invalid data: %w", err) + } + } + + return p, nil +} + +func NewPacket(addr net.Addr) Packet { p := &pkt{ header: PacketHeader{ Addr: addr, @@ -324,13 +276,6 @@ func NewPacket(addr net.Addr, rawdata []byte) Packet { payload: payloadPool.Get(), } - if len(rawdata) != 0 { - if err := p.Unmarshal(rawdata); err != nil { - p.Decommission() - return nil - } - } - return p } @@ -424,6 +369,10 @@ func (p *pkt) Unmarshal(data []byte) error { } func (p *pkt) Marshal(w io.Writer) error { + if w == nil { + return fmt.Errorf("invalid writer") + } + var buffer [16]byte if p.payload == nil { @@ -685,6 +634,10 @@ func (c *CIFHandshake) Unmarshal(data []byte) error { } func (c *CIFHandshake) Marshal(w io.Writer) { + if w == nil { + return + } + var buffer [128]byte if len(c.StreamId) == 0 { @@ -856,6 +809,10 @@ func (c *CIFHandshakeExtension) Unmarshal(data []byte) error { } func (c *CIFHandshakeExtension) Marshal(w io.Writer) { + if w == nil { + return + } + var buffer [12]byte binary.BigEndian.PutUint32(buffer[0:], c.SRTVersion) @@ -1050,6 +1007,10 @@ func (c *CIFKeyMaterialExtension) Unmarshal(data []byte) error { } func (c *CIFKeyMaterialExtension) Marshal(w io.Writer) { + if w == nil { + return + } + var buffer [128]byte b := byte(0) @@ -1170,6 +1131,10 @@ func (c *CIFACK) Unmarshal(data []byte) error { } func (c *CIFACK) Marshal(w io.Writer) { + if w == nil { + return + } + var buffer [28]byte binary.BigEndian.PutUint32(buffer[0:], c.LastACKPacketSequenceNumber.Val()) @@ -1257,6 +1222,10 @@ func (c *CIFNAK) Unmarshal(data []byte) error { } func (c *CIFNAK) Marshal(w io.Writer) { + if w == nil { + return + } + if len(c.LostPacketSequenceNumber)%2 != 0 { return } @@ -1294,6 +1263,10 @@ func (c *CIFShutdown) Unmarshal(data []byte) error { } func (c *CIFShutdown) Marshal(w io.Writer) { + if w == nil { + return + } + var buffer [4]byte binary.BigEndian.PutUint32(buffer[0:], 0) diff --git a/vendor/github.com/datarhei/gosrt/listen.go b/vendor/github.com/datarhei/gosrt/listen.go index 94b911a4..bbe426c8 100644 --- a/vendor/github.com/datarhei/gosrt/listen.go +++ b/vendor/github.com/datarhei/gosrt/listen.go @@ -39,6 +39,52 @@ const ( SUBSCRIBE // This connection is meant to read data from a PUBLISHed stream ) +// RejectionReason are the rejection reasons that can be returned from the AcceptFunc in order to send +// another reason than the default one (REJ_PEER) to the client. +type RejectionReason uint32 + +// Table 7: Handshake Rejection Reason Codes +const ( + REJ_UNKNOWN RejectionReason = 1000 // unknown reason + REJ_SYSTEM RejectionReason = 1001 // system function error + REJ_PEER RejectionReason = 1002 // rejected by peer + REJ_RESOURCE RejectionReason = 1003 // resource allocation problem + REJ_ROGUE RejectionReason = 1004 // incorrect data in handshake + REJ_BACKLOG RejectionReason = 1005 // listener's backlog exceeded + REJ_IPE RejectionReason = 1006 // internal program error + REJ_CLOSE RejectionReason = 1007 // socket is closing + REJ_VERSION RejectionReason = 1008 // peer is older version than agent's min + REJ_RDVCOOKIE RejectionReason = 1009 // rendezvous cookie collision + REJ_BADSECRET RejectionReason = 1010 // wrong password + REJ_UNSECURE RejectionReason = 1011 // password required or unexpected + REJ_MESSAGEAPI RejectionReason = 1012 // stream flag collision + REJ_CONGESTION RejectionReason = 1013 // incompatible congestion-controller type + REJ_FILTER RejectionReason = 1014 // incompatible packet filter + REJ_GROUP RejectionReason = 1015 // incompatible group +) + +// These are the extended rejection reasons that may be less well supported +// Codes & their meanings taken from https://github.com/Haivision/srt/blob/f477af533562505abf5295f059cf2156b17be740/srtcore/access_control.h +const ( + REJX_BAD_REQUEST RejectionReason = 1400 // General syntax error in the SocketID specification (also a fallback code for undefined cases) + REJX_UNAUTHORIZED RejectionReason = 1401 // Authentication failed, provided that the user was correctly identified and access to the required resource would be granted + REJX_OVERLOAD RejectionReason = 1402 // The server is too heavily loaded, or you have exceeded credits for accessing the service and the resource. + REJX_FORBIDDEN RejectionReason = 1403 // Access denied to the resource by any kind of reason. + REJX_NOTFOUND RejectionReason = 1404 // Resource not found at this time. + REJX_BAD_MODE RejectionReason = 1405 // The mode specified in `m` key in StreamID is not supported for this request. + REJX_UNACCEPTABLE RejectionReason = 1406 // The requested parameters specified in SocketID cannot be satisfied for the requested resource. Also when m=publish and the data format is not acceptable. + REJX_CONFLICT RejectionReason = 1407 // The resource being accessed is already locked for modification. This is in case of m=publish and the specified resource is currently read-only. + REJX_NOTSUP_MEDIA RejectionReason = 1415 // The media type is not supported by the application. This is the `t` key that specifies the media type as stream, file and auth, possibly extended by the application. + REJX_LOCKED RejectionReason = 1423 // The resource being accessed is locked for any access. + REJX_FAILED_DEPEND RejectionReason = 1424 // The request failed because it specified a dependent session ID that has been disconnected. + REJX_ISE RejectionReason = 1500 // Unexpected internal server error + REJX_UNIMPLEMENTED RejectionReason = 1501 // The request was recognized, but the current version doesn't support it. + REJX_GW RejectionReason = 1502 // The server acts as a gateway and the target endpoint rejected the connection. + REJX_DOWN RejectionReason = 1503 // The service has been temporarily taken over by a stub reporting this error. The real service can be down for maintenance or crashed. + REJX_VERSION RejectionReason = 1505 // SRT version not supported. This might be either unsupported backward compatibility, or an upper value of a version. + REJX_NOROOM RejectionReason = 1507 // The data stream cannot be archived due to lacking storage space. This is in case when the request type was to send a file or the live stream to be archived. +) + // ConnRequest is an incoming connection request type ConnRequest interface { // RemoteAddr returns the address of the peer. The returned net.Addr @@ -63,6 +109,10 @@ type ConnRequest interface { // data. Returns an error if the passphrase did not work or the connection // is not encrypted. SetPassphrase(p string) error + + // SetRejectionReason sets the rejection reason for the connection. If + // no set, REJ_PEER will be used. + SetRejectionReason(r RejectionReason) } // connRequest implements the ConnRequest interface @@ -72,9 +122,11 @@ type connRequest struct { socketId uint32 timestamp uint32 - handshake *packet.CIFHandshake - crypto crypto.Crypto - passphrase string + config Config + handshake *packet.CIFHandshake + crypto crypto.Crypto + passphrase string + rejectionReason RejectionReason } func (req *connRequest) RemoteAddr() net.Addr { @@ -110,6 +162,10 @@ func (req *connRequest) SetPassphrase(passphrase string) error { return nil } +func (req *connRequest) SetRejectionReason(reason RejectionReason) { + req.rejectionReason = reason +} + // ErrListenerClosed is returned when the listener is about to shutdown. var ErrListenerClosed = errors.New("srt: listener closed") @@ -160,7 +216,9 @@ type listener struct { stopReader context.CancelFunc - doneChan chan error + doneChan chan struct{} + doneErr error + doneOnce sync.Once } // Listen returns a new listener on the SRT protocol on the address with @@ -203,6 +261,9 @@ func Listen(network, address string, config Config) (Listener, error) { ln.pc = pc ln.addr = pc.LocalAddr() + if ln.addr == nil { + return nil, fmt.Errorf("listen: no local address") + } ln.conns = make(map[uint32]*srtConn) @@ -210,13 +271,14 @@ func Listen(network, address string, config Config) (Listener, error) { ln.rcvQueue = make(chan packet.Packet, 2048) - ln.syncookie, err = srtnet.NewSYNCookie(ln.addr.String(), nil) + syncookie, err := srtnet.NewSYNCookie(ln.addr.String(), nil) if err != nil { ln.Close() return nil, err } + ln.syncookie = syncookie - ln.doneChan = make(chan error) + ln.doneChan = make(chan struct{}) ln.start = time.Now() @@ -229,7 +291,7 @@ func Listen(network, address string, config Config) (Listener, error) { for { if ln.isShutdown() { - ln.doneChan <- ErrListenerClosed + ln.markDone(ErrListenerClosed) return } @@ -241,16 +303,16 @@ func Listen(network, address string, config Config) (Listener, error) { } if ln.isShutdown() { - ln.doneChan <- ErrListenerClosed + ln.markDone(ErrListenerClosed) return } - ln.doneChan <- err + ln.markDone(err) return } - p := packet.NewPacket(addr, buffer[:n]) - if p == nil { + p, err := packet.NewPacketFromData(addr, buffer[:n]) + if err != nil { continue } @@ -272,22 +334,27 @@ func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) { } select { - case err := <-ln.doneChan: - return nil, REJECT, err + case <-ln.doneChan: + return nil, REJECT, ln.error() case request := <-ln.backlog: if acceptFn == nil { - ln.reject(request, packet.REJ_PEER) + ln.reject(request, REJ_PEER) break } mode := acceptFn(&request) if mode != PUBLISH && mode != SUBSCRIBE { - ln.reject(request, packet.REJ_PEER) + // Figure out the reason + reason := REJ_PEER + if request.rejectionReason > 0 { + reason = request.rejectionReason + } + ln.reject(request, reason) break } if request.crypto != nil && len(request.passphrase) == 0 { - ln.reject(request, packet.REJ_BADSECRET) + ln.reject(request, REJ_BADSECRET) break } @@ -295,8 +362,8 @@ func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) { socketId := uint32(time.Since(ln.start).Microseconds()) // Select the largest TSBPD delay advertised by the caller, but at least 120ms - recvTsbpdDelay := uint16(ln.config.ReceiverLatency.Milliseconds()) - sendTsbpdDelay := uint16(ln.config.PeerLatency.Milliseconds()) + recvTsbpdDelay := uint16(request.config.ReceiverLatency.Milliseconds()) + sendTsbpdDelay := uint16(request.config.PeerLatency.Milliseconds()) if request.handshake.Version == 5 { if request.handshake.SRTHS.SendTSBPDDelay > recvTsbpdDelay { @@ -307,17 +374,17 @@ func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) { sendTsbpdDelay = request.handshake.SRTHS.RecvTSBPDDelay } - ln.config.StreamId = request.handshake.StreamId + request.config.StreamId = request.handshake.StreamId } - ln.config.Passphrase = request.passphrase + request.config.Passphrase = request.passphrase // Create a new connection conn := newSRTConn(srtConnConfig{ version: request.handshake.Version, localAddr: ln.addr, remoteAddr: request.addr, - config: ln.config, + config: request.config, start: request.start, socketId: socketId, peerSocketId: request.handshake.SRTSocketId, @@ -329,7 +396,7 @@ func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) { keyBaseEncryption: packet.EvenKeyEncrypted, onSend: ln.send, onShutdown: ln.handleShutdown, - logger: ln.config.Logger, + logger: request.config.Logger, }) ln.log("connection:new", func() string { return fmt.Sprintf("%#08x (%s) %s", conn.SocketId(), conn.StreamId(), mode) }) @@ -365,14 +432,33 @@ func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) { return nil, REJECT, nil } +// markDone marks the listener as done by closing +// the done channel & sets the error +func (ln *listener) markDone(err error) { + ln.doneOnce.Do(func() { + ln.lock.Lock() + defer ln.lock.Unlock() + ln.doneErr = err + close(ln.doneChan) + }) +} + +// error returns the error that caused the listener to be done +// if it's nil then the listener is not done +func (ln *listener) error() error { + ln.lock.Lock() + defer ln.lock.Unlock() + return ln.doneErr +} + func (ln *listener) handleShutdown(socketId uint32) { ln.lock.Lock() delete(ln.conns, socketId) ln.lock.Unlock() } -func (ln *listener) reject(request connRequest, reason packet.HandshakeType) { - p := packet.NewPacket(request.addr, nil) +func (ln *listener) reject(request connRequest, reason RejectionReason) { + p := packet.NewPacket(request.addr) p.Header().IsControlPacket = true p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE @@ -382,7 +468,7 @@ func (ln *listener) reject(request connRequest, reason packet.HandshakeType) { p.Header().Timestamp = uint32(time.Since(ln.start).Microseconds()) p.Header().DestinationSocketId = request.socketId - request.handshake.HandshakeType = reason + request.handshake.HandshakeType = packet.HandshakeType(reason) p.MarshalCIF(request.handshake) @@ -393,7 +479,7 @@ func (ln *listener) reject(request connRequest, reason packet.HandshakeType) { } func (ln *listener) accept(request connRequest) { - p := packet.NewPacket(request.addr, nil) + p := packet.NewPacket(request.addr) p.Header().IsControlPacket = true @@ -440,7 +526,12 @@ func (ln *listener) Close() { } func (ln *listener) Addr() net.Addr { - addr, _ := net.ResolveUDPAddr("udp", ln.addr.String()) + addrString := "0.0.0.0:0" + if ln.addr != nil { + addrString = ln.addr.String() + } + + addr, _ := net.ResolveUDPAddr("udp", addrString) return addr } @@ -533,6 +624,9 @@ func (ln *listener) handleHandshake(p packet.Packet) { cif.PeerIP.FromNetAddr(ln.addr) + // Create a copy of the configuration for the connection + config := ln.config + if cif.HandshakeType == packet.HSTYPE_INDUCTION { // cif cif.Version = 5 @@ -553,7 +647,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { } else if cif.HandshakeType == packet.HSTYPE_CONCLUSION { // Verify the SYN cookie if !ln.syncookie.Verify(cif.SynCookie, p.Header().Addr.String()) { - cif.HandshakeType = packet.REJ_ROGUE + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return "invalid SYN cookie" }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -565,7 +659,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { // Peer is advertising a too big MSS if cif.MaxTransmissionUnitSize > MAX_MSS_SIZE { - cif.HandshakeType = packet.REJ_ROGUE + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return fmt.Sprintf("MTU is too big (%d bytes)", cif.MaxTransmissionUnitSize) }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -576,13 +670,13 @@ func (ln *listener) handleHandshake(p packet.Packet) { } // If the peer has a smaller MTU size, adjust to it - if cif.MaxTransmissionUnitSize < ln.config.MSS { - ln.config.MSS = cif.MaxTransmissionUnitSize - ln.config.PayloadSize = ln.config.MSS - SRT_HEADER_SIZE - UDP_HEADER_SIZE + if cif.MaxTransmissionUnitSize < config.MSS { + config.MSS = cif.MaxTransmissionUnitSize + config.PayloadSize = config.MSS - SRT_HEADER_SIZE - UDP_HEADER_SIZE - if ln.config.PayloadSize < MIN_PAYLOAD_SIZE { - cif.HandshakeType = packet.REJ_ROGUE - ln.log("handshake:recv:error", func() string { return fmt.Sprintf("payload size is too small (%d bytes)", ln.config.PayloadSize) }) + if config.PayloadSize < MIN_PAYLOAD_SIZE { + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) + ln.log("handshake:recv:error", func() string { return fmt.Sprintf("payload size is too small (%d bytes)", config.PayloadSize) }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) ln.log("handshake:send:cif", func() string { return cif.String() }) @@ -594,7 +688,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { if cif.Version == 4 { // Check if the type (encryption field + extension field) has the value 2 if cif.EncryptionField != 0 || cif.ExtensionField != 2 { - cif.HandshakeType = packet.REJ_ROGUE + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return "invalid type, expecting a value of 2 (UDT_DGRAM)" }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -605,10 +699,10 @@ func (ln *listener) handleHandshake(p packet.Packet) { } } else if cif.Version == 5 { // Check if the peer version is sufficient - if cif.SRTHS.SRTVersion < ln.config.MinVersion { - cif.HandshakeType = packet.REJ_VERSION + if cif.SRTHS.SRTVersion < config.MinVersion { + cif.HandshakeType = packet.HandshakeType(REJ_VERSION) ln.log("handshake:recv:error", func() string { - return fmt.Sprintf("peer version insufficient (%#06x), expecting at least %#06x", cif.SRTHS.SRTVersion, ln.config.MinVersion) + return fmt.Sprintf("peer version insufficient (%#06x), expecting at least %#06x", cif.SRTHS.SRTVersion, config.MinVersion) }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -620,7 +714,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { // Check the required SRT flags if !cif.SRTHS.SRTFlags.TSBPDSND || !cif.SRTHS.SRTFlags.TSBPDRCV || !cif.SRTHS.SRTFlags.TLPKTDROP || !cif.SRTHS.SRTFlags.PERIODICNAK || !cif.SRTHS.SRTFlags.REXMITFLG { - cif.HandshakeType = packet.REJ_ROGUE + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return "not all required flags are set" }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -632,7 +726,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { // We only support live streaming if cif.SRTHS.SRTFlags.STREAM { - cif.HandshakeType = packet.REJ_MESSAGEAPI + cif.HandshakeType = packet.HandshakeType(REJ_MESSAGEAPI) ln.log("handshake:recv:error", func() string { return "only live streaming is supported" }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -642,7 +736,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { return } } else { - cif.HandshakeType = packet.REJ_ROGUE + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return fmt.Sprintf("only HSv4 and HSv5 are supported (got HSv%d)", cif.Version) }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -659,6 +753,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { start: time.Now(), socketId: cif.SRTSocketId, timestamp: p.Header().Timestamp, + config: config, handshake: cif, } @@ -666,7 +761,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { if cif.SRTKM != nil { cr, err := crypto.New(int(cif.SRTKM.KLen)) if err != nil { - cif.HandshakeType = packet.REJ_ROGUE + cif.HandshakeType = packet.HandshakeType(REJ_ROGUE) ln.log("handshake:recv:error", func() string { return fmt.Sprintf("crypto: %s", err) }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -683,7 +778,7 @@ func (ln *listener) handleHandshake(p packet.Packet) { select { case ln.backlog <- c: default: - cif.HandshakeType = packet.REJ_BACKLOG + cif.HandshakeType = packet.HandshakeType(REJ_BACKLOG) ln.log("handshake:recv:error", func() string { return "backlog is full" }) p.MarshalCIF(cif) ln.log("handshake:send:dump", func() string { return p.Dump() }) @@ -700,5 +795,9 @@ func (ln *listener) handleHandshake(p packet.Packet) { } func (ln *listener) log(topic string, message func() string) { + if ln.config.Logger == nil { + return + } + ln.config.Logger.Print(topic, 0, 2, message) } diff --git a/vendor/github.com/datarhei/joy4/format/rtmp/rtmp.go b/vendor/github.com/datarhei/joy4/format/rtmp/rtmp.go index 961b2146..ebaedaf9 100644 --- a/vendor/github.com/datarhei/joy4/format/rtmp/rtmp.go +++ b/vendor/github.com/datarhei/joy4/format/rtmp/rtmp.go @@ -250,6 +250,8 @@ type Conn struct { metadata flvio.AMFMap eventtype uint16 + + start time.Time } type txrxcount struct { @@ -282,11 +284,15 @@ func NewConn(netconn net.Conn) *Conn { conn.bufw = bufio.NewWriterSize(conn.txrxcount, pio.RecommendBufioSize) conn.writebuf = make([]byte, 4096) conn.readbuf = make([]byte, 4096) + conn.start = time.Now() return conn } type chunkStream struct { timenow uint32 + prevtimenow uint32 + tscount int + gentimenow bool timedelta uint32 hastimeext bool msgsid uint32 @@ -429,16 +435,22 @@ func createURL(tcurl, app, play string) (*url.URL, error) { var CodecTypes = flv.CodecTypes func (conn *Conn) writeBasicConf() (err error) { - // > SetChunkSize - if err = conn.writeSetChunkSize(1024 * 1024 * 1); err != nil { - return - } // > WindowAckSize - if err = conn.writeWindowAckSize(1024 * 1024 * 3); err != nil { + if err = conn.writeWindowAckSize(2500000, false); err != nil { return } // > SetPeerBandwidth - if err = conn.writeSetPeerBandwidth(1024*1024*3, 0); err != nil { + if err = conn.writeSetPeerBandwidth(2500000, 2, true); err != nil { + return + } + if conn.isserver { + // > StreamBegin + if err = conn.writeStreamBegin(0, true); err != nil { + return + } + } + // > SetChunkSize + if err = conn.writeSetChunkSize(1024*64, true); err != nil { return } return @@ -460,7 +472,7 @@ func (conn *Conn) readConnect() (err error) { return } - //fmt.Printf("readConnect: %+v\n", self.commandobj) + //fmt.Printf("readConnect: %+v\n", conn.commandobj) var ok bool var _app, _tcurl interface{} @@ -484,22 +496,32 @@ func (conn *Conn) readConnect() (err error) { return } + objectEncoding, ok := connectparams["objectEncoding"] + if !ok { + objectEncoding = 3 + } + // > _result("NetConnection.Connect.Success") - if err = conn.writeCommandMsg(3, 0, "_result", conn.commandtransid, + if err = conn.writeCommandMsg(false, 3, 0, "_result", conn.commandtransid, flvio.AMFMap{ - "fmtVer": "FMS/3,0,1,123", + "fmsVer": "FMS/3,0,1,123", "capabilities": 31, }, flvio.AMFMap{ "level": "status", "code": "NetConnection.Connect.Success", "description": "Connection succeeded.", - "objectEncoding": 3, + "objectEncoding": objectEncoding, }, ); err != nil { return } + // > onBWDone() + if err = conn.writeCommandMsg(true, 3, 0, "onBWDone", 0, nil, 8192); err != nil { + return + } + if err = conn.flushWrite(); err != nil { return } @@ -515,7 +537,7 @@ func (conn *Conn) readConnect() (err error) { case "createStream": conn.avmsgsid = uint32(1) // > _result(streamid) - if err = conn.writeCommandMsg(3, 0, "_result", conn.commandtransid, nil, conn.avmsgsid); err != nil { + if err = conn.writeCommandMsg(false, 3, 0, "_result", conn.commandtransid, nil, conn.avmsgsid); err != nil { return } if err = conn.flushWrite(); err != nil { @@ -540,7 +562,7 @@ func (conn *Conn) readConnect() (err error) { } // > onStatus() - if err = conn.writeCommandMsg(5, conn.avmsgsid, + if err = conn.writeCommandMsg(false, 5, conn.avmsgsid, "onStatus", conn.commandtransid, nil, flvio.AMFMap{ "level": "status", @@ -584,12 +606,12 @@ func (conn *Conn) readConnect() (err error) { playpath, _ := conn.commandparams[0].(string) // > streamBegin(streamid) - if err = conn.writeStreamBegin(conn.avmsgsid); err != nil { + if err = conn.writeStreamBegin(conn.avmsgsid, false); err != nil { return } // > onStatus() - if err = conn.writeCommandMsg(5, conn.avmsgsid, + if err = conn.writeCommandMsg(false, 5, conn.avmsgsid, "onStatus", conn.commandtransid, nil, flvio.AMFMap{ "level": "status", @@ -690,13 +712,11 @@ func (conn *Conn) writeConnect(path string) (err error) { return } - fmt.Printf("writeConnect: app: %s\n", path) - // > connect("app") if Debug { fmt.Printf("rtmp: > connect('%s') host=%s\n", path, conn.URL.Host) } - if err = conn.writeCommandMsg(3, 0, "connect", 1, + if err = conn.writeCommandMsg(false, 3, 0, "connect", 1, flvio.AMFMap{ "app": path, "flashVer": "MAC 22,0,0,192", @@ -762,7 +782,7 @@ func (conn *Conn) connectPublish() (err error) { if Debug { fmt.Printf("rtmp: > createStream()\n") } - if err = conn.writeCommandMsg(3, 0, "createStream", transid, nil); err != nil { + if err = conn.writeCommandMsg(false, 3, 0, "createStream", transid, nil); err != nil { return } transid++ @@ -792,7 +812,7 @@ func (conn *Conn) connectPublish() (err error) { if Debug { fmt.Printf("rtmp: > publish('%s')\n", publishpath) } - if err = conn.writeCommandMsg(8, conn.avmsgsid, "publish", transid, nil, publishpath); err != nil { + if err = conn.writeCommandMsg(false, 8, conn.avmsgsid, "publish", transid, nil, publishpath); err != nil { return } transid++ @@ -818,12 +838,12 @@ func (conn *Conn) connectPlay() (err error) { if Debug { fmt.Printf("rtmp: > createStream()\n") } - if err = conn.writeCommandMsg(3, 0, "createStream", 2, nil); err != nil { + if err = conn.writeCommandMsg(false, 3, 0, "createStream", 2, nil); err != nil { return } // > SetBufferLength 0,100ms - if err = conn.writeSetBufferLength(0, 100); err != nil { + if err = conn.writeSetBufferLength(0, 100, false); err != nil { return } @@ -852,7 +872,7 @@ func (conn *Conn) connectPlay() (err error) { if Debug { fmt.Printf("rtmp: > play('%s')\n", playpath) } - if err = conn.writeCommandMsg(8, conn.avmsgsid, "play", 0, nil, playpath); err != nil { + if err = conn.writeCommandMsg(false, 8, conn.avmsgsid, "play", 0, nil, playpath); err != nil { return } if err = conn.flushWrite(); err != nil { @@ -1026,37 +1046,49 @@ func (conn *Conn) tmpwbuf(n int) []byte { return conn.writebuf } -func (conn *Conn) writeSetChunkSize(size int) (err error) { +func (conn *Conn) writeSetChunkSize(size int, append bool) (err error) { conn.writeMaxChunkSize = size - b := conn.tmpwbuf(chunkHeaderLength + 4) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidSetChunkSize, 0, 4) + var b []byte + var n int + + b = conn.tmpwbuf(chunkHeaderLength + 4) + n = conn.fillChunkHeader(append, b, 2, 0, msgtypeidSetChunkSize, 0, 4) pio.PutU32BE(b[n:], uint32(size)) n += 4 _, err = conn.bufw.Write(b[:n]) return } -func (conn *Conn) writeAck(seqnum uint32) (err error) { - b := conn.tmpwbuf(chunkHeaderLength + 4) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidAck, 0, 4) +func (conn *Conn) writeAck(seqnum uint32, append bool) (err error) { + var b []byte + var n int + + b = conn.tmpwbuf(chunkHeaderLength + 4) + n = conn.fillChunkHeader(append, b, 2, 0, msgtypeidAck, 0, 4) pio.PutU32BE(b[n:], seqnum) n += 4 _, err = conn.bufw.Write(b[:n]) return } -func (conn *Conn) writeWindowAckSize(size uint32) (err error) { - b := conn.tmpwbuf(chunkHeaderLength + 4) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidWindowAckSize, 0, 4) +func (conn *Conn) writeWindowAckSize(size uint32, append bool) (err error) { + var b []byte + var n int + + b = conn.tmpwbuf(chunkHeaderLength + 4) + n = conn.fillChunkHeader(append, b, 2, 0, msgtypeidWindowAckSize, 0, 4) pio.PutU32BE(b[n:], size) n += 4 _, err = conn.bufw.Write(b[:n]) return } -func (conn *Conn) writeSetPeerBandwidth(acksize uint32, limittype uint8) (err error) { - b := conn.tmpwbuf(chunkHeaderLength + 5) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidSetPeerBandwidth, 0, 5) +func (conn *Conn) writeSetPeerBandwidth(acksize uint32, limittype uint8, append bool) (err error) { + var b []byte + var n int + + b = conn.tmpwbuf(chunkHeaderLength + 5) + n = conn.fillChunkHeader(append, b, 2, 0, msgtypeidSetPeerBandwidth, 0, 5) pio.PutU32BE(b[n:], acksize) n += 4 b[n] = limittype @@ -1065,22 +1097,22 @@ func (conn *Conn) writeSetPeerBandwidth(acksize uint32, limittype uint8) (err er return } -func (conn *Conn) writeCommandMsg(csid, msgsid uint32, args ...interface{}) (err error) { - return conn.writeAMF0Msg(msgtypeidCommandMsgAMF0, csid, msgsid, args...) +func (conn *Conn) writeCommandMsg(append bool, csid, msgsid uint32, args ...interface{}) (err error) { + return conn.writeAMF0Msg(append, msgtypeidCommandMsgAMF0, csid, msgsid, args...) } func (conn *Conn) writeDataMsg(csid, msgsid uint32, args ...interface{}) (err error) { - return conn.writeAMF0Msg(msgtypeidDataMsgAMF0, csid, msgsid, args...) + return conn.writeAMF0Msg(false, msgtypeidDataMsgAMF0, csid, msgsid, args...) } -func (conn *Conn) writeAMF0Msg(msgtypeid uint8, csid, msgsid uint32, args ...interface{}) (err error) { +func (conn *Conn) writeAMF0Msg(append bool, msgtypeid uint8, csid, msgsid uint32, args ...interface{}) (err error) { size := 0 for _, arg := range args { size += flvio.LenAMF0Val(arg) } b := conn.tmpwbuf(chunkHeaderLength + size) - n := conn.fillChunkHeader(b, csid, 0, msgtypeid, msgsid, size) + n := conn.fillChunkHeader(append, b, csid, 0, msgtypeid, msgsid, size) for _, arg := range args { n += flvio.FillAMF0Val(b[n:], arg) } @@ -1113,31 +1145,60 @@ func (conn *Conn) writeAVTag(tag flvio.Tag, ts int32) (err error) { b := conn.tmpwbuf(actualChunkHeaderLength + flvio.MaxTagSubHeaderLength) hdrlen := tag.FillHeader(b[actualChunkHeaderLength:]) - conn.fillChunkHeader(b, csid, ts, msgtypeid, conn.avmsgsid, hdrlen+len(data)) - n := hdrlen + actualChunkHeaderLength - - if n+len(data) > conn.writeMaxChunkSize { - if err = conn.writeSetChunkSize(n + len(data)); err != nil { - return - } - } + conn.fillChunkHeader(false, b, csid, ts, msgtypeid, conn.avmsgsid, hdrlen+len(data)) + n := actualChunkHeaderLength + hdrlen if _, err = conn.bufw.Write(b[:n]); err != nil { return } - if _, err = conn.bufw.Write(data); err != nil { + chunksize := conn.writeMaxChunkSize + msgdataleft := len(data) + + n = msgdataleft + if msgdataleft > chunksize-hdrlen { + n = chunksize - hdrlen + } + + if _, err = conn.bufw.Write(data[:n]); err != nil { return } + data = data[n:] + msgdataleft -= n + + for { + if msgdataleft == 0 { + break + } + + n = conn.fillChunkHeader3(b, csid, ts) + + if _, err = conn.bufw.Write(b[:n]); err != nil { + return + } + + n = msgdataleft + if msgdataleft > chunksize { + n = chunksize + } + + if _, err = conn.bufw.Write(data[:n]); err != nil { + return + } + + data = data[n:] + msgdataleft -= n + } + err = conn.bufw.Flush() return } -func (conn *Conn) writeStreamBegin(msgsid uint32) (err error) { +func (conn *Conn) writeStreamBegin(msgsid uint32, append bool) (err error) { b := conn.tmpwbuf(chunkHeaderLength + 6) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidUserControl, 0, 6) + n := conn.fillChunkHeader(append, b, 2, 0, msgtypeidUserControl, 0, 6) pio.PutU16BE(b[n:], eventtypeStreamBegin) n += 2 pio.PutU32BE(b[n:], msgsid) @@ -1146,9 +1207,9 @@ func (conn *Conn) writeStreamBegin(msgsid uint32) (err error) { return } -func (conn *Conn) writeSetBufferLength(msgsid uint32, timestamp uint32) (err error) { +func (conn *Conn) writeSetBufferLength(msgsid uint32, timestamp uint32, append bool) (err error) { b := conn.tmpwbuf(chunkHeaderLength + 10) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidUserControl, 0, 10) + n := conn.fillChunkHeader(append, b, 2, 0, msgtypeidUserControl, 0, 10) pio.PutU16BE(b[n:], eventtypeSetBufferLength) n += 2 pio.PutU32BE(b[n:], msgsid) @@ -1159,9 +1220,9 @@ func (conn *Conn) writeSetBufferLength(msgsid uint32, timestamp uint32) (err err return } -func (conn *Conn) writePingResponse(timestamp uint32) (err error) { +func (conn *Conn) writePingResponse(timestamp uint32, append bool) (err error) { b := conn.tmpwbuf(chunkHeaderLength + 10) - n := conn.fillChunkHeader(b, 2, 0, msgtypeidUserControl, 0, 6) + n := conn.fillChunkHeader(append, b, 2, 0, msgtypeidUserControl, 0, 6) pio.PutU16BE(b[n:], eventtypePingResponse) n += 2 pio.PutU32BE(b[n:], timestamp) @@ -1173,41 +1234,74 @@ func (conn *Conn) writePingResponse(timestamp uint32) (err error) { const chunkHeaderLength = 12 const FlvTimestampMax = 0xFFFFFF -func (conn *Conn) fillChunkHeader(b []byte, csid uint32, timestamp int32, msgtypeid uint8, msgsid uint32, msgdatalen int) (n int) { - // 0 1 2 3 - // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // | timestamp |message length | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // | message length (cont) |message type id| msg stream id | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // | message stream id (cont) | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // - // Figure 9 Chunk Message Header – Type 0 +func (conn *Conn) fillChunkHeader(append bool, b []byte, csid uint32, timestamp int32, msgtypeid uint8, msgsid uint32, msgdatalen int) (n int) { + if !append { + // 0 1 2 3 + // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | timestamp |message length | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | message length (cont) |message type id| msg stream id | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | message stream id (cont) | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // + // Figure 9 Chunk Message Header – Type 0 - b[n] = byte(csid) & 0x3f - n++ - if uint32(timestamp) <= FlvTimestampMax { - pio.PutU24BE(b[n:], uint32(timestamp)) - } else { - pio.PutU24BE(b[n:], FlvTimestampMax) - } - n += 3 - pio.PutU24BE(b[n:], uint32(msgdatalen)) - n += 3 - b[n] = msgtypeid - n++ - pio.PutU32LE(b[n:], msgsid) - n += 4 - if uint32(timestamp) > FlvTimestampMax { - pio.PutU32BE(b[n:], uint32(timestamp)) + b[n] = byte(csid) & 0x3f + n++ + if uint32(timestamp) <= FlvTimestampMax { + pio.PutU24BE(b[n:], uint32(timestamp)) + } else { + pio.PutU24BE(b[n:], FlvTimestampMax) + } + n += 3 + pio.PutU24BE(b[n:], uint32(msgdatalen)) + n += 3 + b[n] = msgtypeid + n++ + pio.PutU32LE(b[n:], msgsid) n += 4 + if uint32(timestamp) > FlvTimestampMax { + pio.PutU32BE(b[n:], uint32(timestamp)) + n += 4 + } + } else { + // 0 1 2 3 + // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | timestamp delta |message length | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | message length (cont) |message type id| + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // + // Figure 10 Chunk Message Header – Type 1 + + b[n] = 1 << 6 + b[n] += byte(csid) & 0x3f + n++ + pio.PutU24BE(b[n:], 0) + n += 3 + pio.PutU24BE(b[n:], uint32(msgdatalen)) + n += 3 + b[n] = msgtypeid + n++ } if Debug { fmt.Printf("rtmp: write chunk msgdatalen=%d msgsid=%d\n", msgdatalen, msgsid) - fmt.Print(hex.Dump(b[:msgdatalen])) + fmt.Print(hex.Dump(b[:n])) + } + + return +} + +func (c *Conn) fillChunkHeader3(b []byte, csid uint32, timestamp int32) (n int) { + pio.PutU8(b, (uint8(csid)&0x3f)|3<<6) + n++ + if uint32(timestamp) >= FlvTimestampMax { + pio.PutU32BE(b[n:], uint32(timestamp)) + n += 4 } return @@ -1424,15 +1518,37 @@ func (conn *Conn) readChunk() (err error) { fmt.Print(hex.Dump(cs.msgdata)) } - if err = conn.handleMsg(cs.timenow, cs.msgsid, cs.msgtypeid, cs.msgdata); err != nil { + timestamp = cs.timenow + + if cs.msgtypeid == msgtypeidVideoMsg || cs.msgtypeid == msgtypeidAudioMsg { + if !cs.gentimenow { + if cs.prevtimenow >= cs.timenow { + cs.tscount++ + } else { + cs.tscount = 0 + } + + if cs.tscount > 3 { + cs.gentimenow = true + } + } + + if cs.gentimenow { + timestamp = uint32((time.Since(conn.start).Milliseconds() % 0xFFFFFFFF) & 0xFFFFFFFF) + } + } + + if err = conn.handleMsg(timestamp, cs.msgsid, cs.msgtypeid, cs.msgdata); err != nil { return fmt.Errorf("handleMsg: %w", err) } + + cs.prevtimenow = cs.timenow } conn.ackn += uint32(n) if conn.readAckSize != 0 && conn.ackn > conn.readAckSize { - if err = conn.writeAck(conn.ackn); err != nil { + if err = conn.writeAck(conn.ackn, false); err != nil { return fmt.Errorf("writeACK: %w", err) } conn.flushWrite() @@ -1518,7 +1634,7 @@ func (conn *Conn) handleMsg(timestamp uint32, msgsid uint32, msgtypeid uint8, ms return } pingtimestamp := pio.U32BE(msgdata[2:]) - conn.writePingResponse(pingtimestamp) + conn.writePingResponse(pingtimestamp, false) conn.flushWrite() } @@ -1593,13 +1709,11 @@ func (conn *Conn) handleMsg(timestamp uint32, msgsid uint32, msgtypeid uint8, ms return } conn.readMaxChunkSize = int(pio.U32BE(msgdata)) - return case msgtypeidWindowAckSize: if len(conn.msgdata) != 4 { return fmt.Errorf("invalid packet of WindowAckSize") } conn.readAckSize = pio.U32BE(conn.msgdata) - return default: if Debug { fmt.Printf("rtmp: unhandled msg: %d\n", msgtypeid) diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 00000000..e7f28ed6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 03555184..2b2e4631 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -16,9 +16,130 @@ package swag import ( "sort" + "strings" "sync" ) +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + // indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. // Since go1.9, this may be implemented with sync.Map. type indexOfInitialisms struct { @@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) { sort.Sort(sort.Reverse(byInitialism(result))) return } + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9b..8bb64ac3 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string - } - - casualNameLexem struct { - original string + kind lexemKind } ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName +) + +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7..274727a8 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool } - splitterOption func(*splitter) *splitter + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch ) -// split calls the splitter; splitter provides more control and post options -func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. - for _, lexem := range lexems { + matchesPool struct { + *sync.Pool + } + + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } +) + +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options +func split(str string) []string { + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) + + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter -} - -// withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { - s.postSplitInitialismCheck = true return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool - } - initialismMatches []*initialismMatch -) +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) { + s.postSplitInitialismCheck = true +} -func (s *splitter) toNameLexems(name string) []nameLexem { +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) + } + + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } - - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } - - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue } - match.complete = true - match.end = currentRunePosition - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - newMatches = append(newMatches, match) + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + *newMatches = append(*newMatches, match) + } } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + s.appendBrokenDownCasualString(nameLexems, rest) } - if lastAcceptedMatch.end+1 != len(nameRunes) { - rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) - } + poolOfMatches.RedeemMatches(matches) return nameLexems } -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b -} - -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) } - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - - return segments +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } + + i += size + } + + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 00000000..c52d6bf7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,22 @@ +package swag + +import "unsafe" + +type internalString struct { + Data unsafe.Pointer + Len int +} + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + p := (*internalString)(unsafe.Pointer(&str)).Data + return unsafe.Slice((*byte)(p), len(str)) +} + +/* + * go1.20 version (for when go mod moves to a go1.20 requirement): + +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} +*/ diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 0413f744..5051401c 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,22 +105,6 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { return strings.TrimSpace(str) @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,26 +162,31 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { + out := make([]string, 0, len(*in)) + for _, w := range *in { original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) @@ -251,6 +194,8 @@ func ToHumanNameTitle(name string) string { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems - result := "" - for _, lexem := range lexems { + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } + + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile index 09f171ba..8bf4f28a 100644 --- a/vendor/github.com/go-playground/validator/v10/Makefile +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -1,4 +1,4 @@ -GOCMD=GO111MODULE=on go +GOCMD=go linters-install: @golangci-lint --version >/dev/null 2>&1 || { \ diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index dbbafc3e..b3e113a1 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.16.0-green.svg) +![Project status](https://img.shields.io/badge/version-10.18.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go index 5856d57c..be2676e9 100644 --- a/vendor/github.com/go-playground/validator/v10/errors.go +++ b/vendor/github.com/go-playground/validator/v10/errors.go @@ -257,15 +257,19 @@ func (fe *fieldError) Error() string { // NOTE: if no registered translation can be found, it returns the original // untranslated error message. func (fe *fieldError) Translate(ut ut.Translator) string { + var fn TranslationFunc m, ok := fe.v.transTagFunc[ut] if !ok { return fe.Error() } - fn, ok := m[fe.tag] + fn, ok = m[fe.tag] if !ok { - return fe.Error() + fn, ok = m[fe.actualTag] + if !ok { + return fe.Error() + } } return fn(ut, fe) diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829d..7ec5ac7e 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4be..dc60082d 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb..3167b643 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go new file mode 100644 index 00000000..affbbbb5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/norace.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +func ReadSlice[T any](s []T) { +} + +func WriteSlice[T any](s []T) { +} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go new file mode 100644 index 00000000..f5e240dc --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/race.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "runtime" + "unsafe" +) + +func ReadSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} + +func WriteSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go index 6c7feafc..264ffd0a 100644 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -10,6 +10,8 @@ import ( "errors" "fmt" "strconv" + + "github.com/klauspost/compress/internal/race" ) var ( @@ -63,6 +65,10 @@ func Decode(dst, src []byte) ([]byte, error) { } else { dst = make([]byte, dLen) } + + race.WriteSlice(dst) + race.ReadSlice(src[s:]) + if s2Decode(dst, src[s:]) != 0 { return nil, ErrCorrupt } diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go index 5e57995d..99770456 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -117,6 +117,12 @@ func encodeBlockGo(dst, src []byte) (d int) { i-- base-- } + + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:base]) // Extend forward @@ -152,7 +158,6 @@ func encodeBlockGo(dst, src []byte) (d int) { if s >= sLimit { goto emitRemainder } - cv = load64(src, s) continue } @@ -325,6 +330,11 @@ func encodeBlockSnappyGo(dst, src []byte) (d int) { i-- base-- } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:base]) // Extend forward @@ -532,6 +542,11 @@ searchDict: i-- base-- } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:base]) if debug && nextEmit != base { fmt.Println("emitted ", base-nextEmit, "literals") @@ -880,6 +895,11 @@ searchDict: i-- base-- } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:base]) if debug && nextEmit != base { fmt.Println("emitted ", base-nextEmit, "literals") diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index ebc332ad..4f45206a 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -3,6 +3,8 @@ package s2 +import "github.com/klauspost/compress/internal/race" + const hasAmd64Asm = true // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It @@ -14,6 +16,9 @@ const hasAmd64Asm = true // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -50,6 +55,9 @@ func encodeBlock(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetter(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -86,6 +94,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -121,6 +132,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s index 5f110d19..2ff5b334 100644 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -100,6 +100,15 @@ repeat_extend_back_loop_encodeBlockAsm: JNZ repeat_extend_back_loop_encodeBlockAsm repeat_extend_back_end_encodeBlockAsm: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 5(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm @@ -1513,6 +1522,15 @@ repeat_extend_back_loop_encodeBlockAsm4MB: JNZ repeat_extend_back_loop_encodeBlockAsm4MB repeat_extend_back_end_encodeBlockAsm4MB: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 4(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm4MB: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB @@ -2828,6 +2846,15 @@ repeat_extend_back_loop_encodeBlockAsm12B: JNZ repeat_extend_back_loop_encodeBlockAsm12B repeat_extend_back_end_encodeBlockAsm12B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm12B: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B @@ -3903,6 +3930,15 @@ repeat_extend_back_loop_encodeBlockAsm10B: JNZ repeat_extend_back_loop_encodeBlockAsm10B repeat_extend_back_end_encodeBlockAsm10B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm10B: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B @@ -4978,6 +5014,15 @@ repeat_extend_back_loop_encodeBlockAsm8B: JNZ repeat_extend_back_loop_encodeBlockAsm8B repeat_extend_back_end_encodeBlockAsm8B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm8B: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B @@ -10756,6 +10801,15 @@ repeat_extend_back_loop_encodeSnappyBlockAsm: JNZ repeat_extend_back_loop_encodeSnappyBlockAsm repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 5(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm @@ -11678,6 +11732,15 @@ repeat_extend_back_loop_encodeSnappyBlockAsm64K: JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm64K: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K @@ -12504,6 +12567,15 @@ repeat_extend_back_loop_encodeSnappyBlockAsm12B: JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm12B: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B @@ -13330,6 +13402,15 @@ repeat_extend_back_loop_encodeSnappyBlockAsm10B: JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm10B: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B @@ -14156,6 +14237,15 @@ repeat_extend_back_loop_encodeSnappyBlockAsm8B: JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm8B: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B @@ -17949,6 +18039,15 @@ repeat_extend_back_loop_calcBlockSize: JNZ repeat_extend_back_loop_calcBlockSize repeat_extend_back_end_calcBlockSize: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 5(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +repeat_dst_size_check_calcBlockSize: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_calcBlockSize @@ -18531,6 +18630,15 @@ repeat_extend_back_loop_calcBlockSizeSmall: JNZ repeat_extend_back_loop_calcBlockSizeSmall repeat_extend_back_end_calcBlockSizeSmall: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +repeat_dst_size_check_calcBlockSizeSmall: MOVL 12(SP), BX CMPL BX, SI JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go index 2f01a398..8372d752 100644 --- a/vendor/github.com/klauspost/compress/s2/reader.go +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -104,12 +104,14 @@ func ReaderIgnoreStreamIdentifier() ReaderOption { // For each chunk with the ID, the callback is called with the content. // Any returned non-nil error will abort decompression. // Only one callback per ID is supported, latest sent will be used. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { return func(r *Reader) error { if id < 0x80 || id > 0xfd { return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") } - r.skippableCB[id] = fn + r.skippableCB[id-0x80] = fn return nil } } @@ -128,7 +130,7 @@ type Reader struct { err error decoded []byte buf []byte - skippableCB [0x80]func(r io.Reader) error + skippableCB [0xff - 0x80]func(r io.Reader) error blockStart int64 // Uncompressed offset at start of current. index *Index @@ -201,7 +203,7 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { // The supplied slice does not need to be the size of the read. func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { if id < 0x80 { - r.err = fmt.Errorf("interbal error: skippable id < 0x80") + r.err = fmt.Errorf("internal error: skippable id < 0x80") return false } if fn := r.skippableCB[id-0x80]; fn != nil { @@ -450,6 +452,12 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e for toWrite := range queue { entry := <-toWrite reUse <- toWrite + if hasErr() || entry == nil { + if entry != nil { + writtenBlocks <- entry + } + continue + } if hasErr() { writtenBlocks <- entry continue @@ -469,13 +477,13 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e } }() - // Reader defer func() { - close(queue) if r.err != nil { - err = r.err setErr(r.err) + } else if err != nil { + setErr(err) } + close(queue) wg.Wait() if err == nil { err = aErr @@ -483,6 +491,7 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e written = aWritten }() + // Reader for !hasErr() { if !r.readFull(r.buf[:4], true) { if r.err == io.EOF { @@ -551,11 +560,13 @@ func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, e if err != nil { writtenBlocks <- decoded setErr(err) + entry <- nil return } if !r.ignoreCRC && crc(decoded) != checksum { writtenBlocks <- decoded setErr(ErrCRC) + entry <- nil return } entry <- decoded @@ -1048,15 +1059,17 @@ func (r *Reader) ReadByte() (byte, error) { } // SkippableCB will register a callback for chunks with the specified ID. -// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). // For each chunk with the ID, the callback is called with the content. // Any returned non-nil error will abort decompression. // Only one callback per ID is supported, latest sent will be used. // Sending a nil function will disable previous callbacks. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { - if id < 0x80 || id > chunkTypePadding { + if id < 0x80 || id >= chunkTypePadding { return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") } - r.skippableCB[id] = fn + r.skippableCB[id-0x80] = fn return nil } diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go index dae3f731..72bcb494 100644 --- a/vendor/github.com/klauspost/compress/s2/s2.go +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -37,6 +37,8 @@ package s2 import ( "bytes" "hash/crc32" + + "github.com/klauspost/compress/internal/race" ) /* @@ -112,6 +114,8 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli) // crc implements the checksum specified in section 3 of // https://github.com/google/snappy/blob/master/framing_format.txt func crc(b []byte) uint32 { + race.ReadSlice(b) + c := crc32.Update(0, crcTable, b) return c>>15 | c<<17 + 0xa282ead8 } diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 089cd36d..1253ea67 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -13,6 +13,8 @@ import ( "io" "runtime" "sync" + + "github.com/klauspost/compress/internal/race" ) const ( @@ -213,7 +215,7 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { return 0, err } if len(w.ibuf) > 0 { - err := w.Flush() + err := w.AsyncFlush() if err != nil { return 0, err } @@ -223,7 +225,7 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { if err := w.EncodeBuffer(buf); err != nil { return 0, err } - return int64(len(buf)), w.Flush() + return int64(len(buf)), w.AsyncFlush() } for { inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] @@ -271,7 +273,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { return fmt.Errorf("skippable block excessed maximum size") } var header [4]byte - chunkLen := 4 + len(data) + chunkLen := len(data) header[0] = id header[1] = uint8(chunkLen >> 0) header[2] = uint8(chunkLen >> 8) @@ -282,7 +284,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { if err = w.err(err); err != nil { return err } - if n != len(data) { + if n != len(b) { return w.err(io.ErrShortWrite) } w.written += int64(n) @@ -303,9 +305,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { if err := write(header[:]); err != nil { return err } - if err := write(data); err != nil { - return err - } + return write(data) } // Create output... @@ -354,7 +354,7 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { } // Flush queued data first. if len(w.ibuf) > 0 { - err := w.Flush() + err := w.AsyncFlush() if err != nil { return err } @@ -385,6 +385,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { buf = buf[len(uncompressed):] // Get an output buffer. obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + race.WriteSlice(obuf) + output := make(chan result) // Queue output now, so we keep order. w.output <- output @@ -393,6 +395,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { } w.uncompWritten += int64(len(uncompressed)) go func() { + race.ReadSlice(uncompressed) + checksum := crc(uncompressed) // Set to uncompressed. @@ -712,9 +716,9 @@ func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { return nRet, nil } -// Flush flushes the Writer to its underlying io.Writer. -// This does not apply padding. -func (w *Writer) Flush() error { +// AsyncFlush writes any buffered bytes to a block and starts compressing it. +// It does not wait for the output has been written as Flush() does. +func (w *Writer) AsyncFlush() error { if err := w.err(nil); err != nil { return err } @@ -734,6 +738,15 @@ func (w *Writer) Flush() error { } } } + return w.err(nil) +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.AsyncFlush(); err != nil { + return err + } if w.output == nil { return w.err(nil) } diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 15b76033..805f5e7b 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -67,195 +67,200 @@ const ( // Keep index -1 as unknown UNKNOWN = -1 - // Add features - ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - AESNI // Advanced Encryption Standard New Instructions - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - AMXBF16 // Tile computational operations on BFLOAT16 numbers - AMXFP16 // Tile computational operations on FP16 numbers - AMXINT8 // Tile computational operations on 8-bit integers - AMXTILE // Tile architecture - APX_F // Intel APX - AVX // AVX functions - AVX10 // If set the Intel AVX10 Converged Vector ISA is supported - AVX10_128 // If set indicates that AVX10 128-bit vector support is present - AVX10_256 // If set indicates that AVX10 256-bit vector support is present - AVX10_512 // If set indicates that AVX10 512-bit vector support is present - AVX2 // AVX2 functions - AVX512BF16 // AVX-512 BFLOAT16 Instructions - AVX512BITALG // AVX-512 Bit Algorithms - AVX512BW // AVX-512 Byte and Word Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512F // AVX-512 Foundation - AVX512FP16 // AVX-512 FP16 Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 - AVX512VL // AVX-512 Vector Length Extensions - AVX512VNNI // AVX-512 Vector Neural Network Instructions - AVX512VP2INTERSECT // AVX-512 Intersect for D/Q - AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword - AVXIFMA // AVX-IFMA instructions - AVXNECONVERT // AVX-NE-CONVERT instructions - AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one - AVXVNNI // AVX (VEX encoded) VNNI neural network instructions - AVXVNNIINT8 // AVX-VNNI-INT8 instructions - BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - CETIBT // Intel CET Indirect Branch Tracking - CETSS // Intel CET Shadow Stack - CLDEMOTE // Cache Line Demote - CLMUL // Carry-less Multiplication - CLZERO // CLZERO instruction supported - CMOV // i686 CMOV - CMPCCXADD // CMPCCXADD instructions - CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB - CMPXCHG8 // CMPXCHG8 instruction - CPBOOST // Core Performance Boost - CPPC // AMD: Collaborative Processor Performance Control - CX16 // CMPXCHG16B Instruction - EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ - ENQCMD // Enqueue Command - ERMS // Enhanced REP MOVSB/STOSB - F16C // Half-precision floating-point conversion - FLUSH_L1D // Flush L1D cache - FMA3 // Intel FMA 3. Does not imply AVX. - FMA4 // Bulldozer FMA4 functions - FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide - FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide - FSRM // Fast Short Rep Mov - FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 - FXSROPT // FXSAVE/FXRSTOR optimizations - GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. - HLE // Hardware Lock Elision - HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR - HTT // Hyperthreading (enabled) - HWA // Hardware assert supported. Indicates support for MSRC001_10 - HYBRID_CPU // This part has CPUs of more than one type. - HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors - IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) - IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - IBRS // AMD: Indirect Branch Restricted Speculation - IBRS_PREFERRED // AMD: IBRS is preferred over software solution - IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection - IBS // Instruction Based Sampling (AMD) - IBSBRNTRGT // Instruction Based Sampling Feature (AMD) - IBSFETCHSAM // Instruction Based Sampling Feature (AMD) - IBSFFV // Instruction Based Sampling Feature (AMD) - IBSOPCNT // Instruction Based Sampling Feature (AMD) - IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) - IBSOPSAM // Instruction Based Sampling Feature (AMD) - IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) - IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) - IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported - IBS_OPDATA4 // AMD: IBS op data 4 MSR supported - IBS_OPFUSE // AMD: Indicates support for IbsOpFuse - IBS_PREVENTHOST // Disallowing IBS use by the host supported - IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 - IDPRED_CTRL // IPRED_DIS - INT_WBINVD // WBINVD/WBNOINVD are interruptible. - INVLPGB // NVLPGB and TLBSYNC instruction supported - KEYLOCKER // Key locker - KEYLOCKERW // Key locker wide - LAHF // LAHF/SAHF in long mode - LAM // If set, CPU supports Linear Address Masking - LBRVIRT // LBR virtualization - LZCNT // LZCNT instruction - MCAOVERFLOW // MCA overflow recovery support. - MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. - MCOMMIT // MCOMMIT instruction supported - MD_CLEAR // VERW clears CPU buffers - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - MOVBE // MOVBE instruction (big-endian) - MOVDIR64B // Move 64 Bytes as Direct Store - MOVDIRI // Move Doubleword as Direct Store - MOVSB_ZL // Fast Zero-Length MOVSB - MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD - MPX // Intel MPX (Memory Protection Extensions) - MSRIRC // Instruction Retired Counter MSR available - MSRLIST // Read/Write List of Model Specific Registers - MSR_PAGEFLUSH // Page Flush MSR available - NRIPS // Indicates support for NRIP save on VMEXIT - NX // NX (No-Execute) bit - OSXSAVE // XSAVE enabled by OS - PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption - POPCNT // POPCNT instruction - PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled - PREFETCHI // PREFETCHIT0/1 instructions - PSFD // Predictive Store Forward Disable - RDPRU // RDPRU instruction supported - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - RDTSCP // RDTSCP Instruction - RRSBA_CTRL // Restricted RSB Alternate - RTM // Restricted Transactional Memory - RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. - SERIALIZE // Serialize Instruction Execution - SEV // AMD Secure Encrypted Virtualization supported - SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host - SEV_ALTERNATIVE // AMD SEV Alternate Injection supported - SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests - SEV_ES // AMD SEV Encrypted State supported - SEV_RESTRICTED // AMD SEV Restricted Injection supported - SEV_SNP // AMD SEV Secure Nested Paging supported - SGX // Software Guard Extensions - SGXLC // Software Guard Extensions Launch Control - SHA // Intel SHA Extensions - SME // AMD Secure Memory Encryption supported - SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced - SPEC_CTRL_SSBD // Speculative Store Bypass Disable - SRBDS_CTRL // SRBDS mitigation MSR available - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE42 // Nehalem SSE4.2 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSSE3 // Conroe SSSE3 functions - STIBP // Single Thread Indirect Branch Predictors - STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On - STOSB_SHORT // Fast short STOSB - SUCCOR // Software uncorrectable error containment and recovery capability. - SVM // AMD Secure Virtual Machine - SVMDA // Indicates support for the SVM decode assists. - SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control - SVML // AMD SVM lock. Indicates support for SVM-Lock. - SVMNP // AMD SVM nested paging - SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter - SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold - SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. - SYSEE // SYSENTER and SYSEXIT instructions - TBM // AMD Trailing Bit Manipulation - TDX_GUEST // Intel Trust Domain Extensions Guest - TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations - TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. - TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. - TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 - TSXLDTRK // Intel TSX Suspend Load Address Tracking - VAES // Vector AES. AVX(512) versions requires additional checks. - VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. - VMPL // AMD VM Permission Levels supported - VMSA_REGPROT // AMD VMSA Register Protection supported - VMX // Virtual Machine Extensions - VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. - VTE // AMD Virtual Transparent Encryption supported - WAITPKG // TPAUSE, UMONITOR, UMWAIT - WBNOINVD // Write Back and Do Not Invalidate Cache - WRMSRNS // Non-Serializing Write to Model Specific Register - X87 // FPU - XGETBV1 // Supports XGETBV with ECX = 1 - XOP // Bulldozer XOP functions - XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV - XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. - XSAVEOPT // XSAVEOPT available - XSAVES // Supports XSAVES/XRSTORS and IA32_XSS + // x86 features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXTILE // Tile architecture + APX_F // Intel APX + AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control + CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. + HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse + IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide + LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. + MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption + POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // Predictive Store Forward Disable + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SBPB // Indicates support for the Selective Branch Predictor Barrier + SERIALIZE // Serialize Instruction Execution + SEV // AMD Secure Encrypted Virtualization supported + SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host + SEV_ALTERNATIVE // AMD SEV Alternate Injection supported + SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests + SEV_ES // AMD SEV Encrypted State supported + SEV_RESTRICTED // AMD SEV Restricted Injection supported + SEV_SNP // AMD SEV Secure Nested Paging supported + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SME // AMD Secure Memory Encryption supported + SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available + SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. + SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability + SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On + STOSB_SHORT // Fast short STOSB + SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions + TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations + TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. + VMPL // AMD VM Permission Levels supported + VMSA_REGPROT // AMD VMSA Register Protection supported + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. + VTE // AMD Virtual Transparent Encryption supported + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register + X87 // FPU + XGETBV1 // Supports XGETBV with ECX = 1 + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. + XSAVEOPT // XSAVEOPT available + XSAVES // Supports XSAVES/XRSTORS and IA32_XSS // ARM features: AESARM // AES instructions @@ -309,10 +314,11 @@ type CPUInfo struct { L2 int // L2 Cache (per core or shared). Will be -1 if undetected L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected } - SGX SGXSupport - AVX10Level uint8 - maxFunc uint32 - maxExFunc uint32 + SGX SGXSupport + AMDMemEncryption AMDMemEncryptionSupport + AVX10Level uint8 + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -1079,6 +1085,32 @@ func hasSGX(available, lc bool) (rval SGXSupport) { return } +type AMDMemEncryptionSupport struct { + Available bool + CBitPossition uint32 + NumVMPL uint32 + PhysAddrReduction uint32 + NumEntryptedGuests uint32 + MinSevNoEsAsid uint32 +} + +func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) { + rval.Available = available + if !available { + return + } + + _, b, c, d := cpuidex(0x8000001f, 0) + + rval.CBitPossition = b & 0x3f + rval.PhysAddrReduction = (b >> 6) & 0x3F + rval.NumVMPL = (b >> 12) & 0xf + rval.NumEntryptedGuests = c + rval.MinSevNoEsAsid = d + + return +} + func support() flagSet { var fs flagSet mfi := maxFunctionID() @@ -1418,6 +1450,15 @@ func support() flagSet { fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) } + if maxExtendedFunction() >= 0x80000021 && vend == AMD { + a, _, _, _ := cpuid(0x80000021) + fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX) + fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO) + fs.setIf((a>>29)&1 == 1, SRSO_NO) + fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE) + fs.setIf((a>>27)&1 == 1, SBPB) + } + if mfi >= 0x20 { // Microsoft has decided to purposefully hide the information // of the guest TEE when VMs are being created using Hyper-V. diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index c7dfa125..799b400c 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -27,6 +27,7 @@ func addInfo(c *CPUInfo, safe bool) { c.Family, c.Model, c.Stepping = familyModel() c.featureSet = support() c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) c.ThreadsPerCore = threadsPerCore() c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 43bd05f5..57a085a5 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -81,152 +81,157 @@ func _() { _ = x[IA32_ARCH_CAP-71] _ = x[IA32_CORE_CAP-72] _ = x[IBPB-73] - _ = x[IBRS-74] - _ = x[IBRS_PREFERRED-75] - _ = x[IBRS_PROVIDES_SMP-76] - _ = x[IBS-77] - _ = x[IBSBRNTRGT-78] - _ = x[IBSFETCHSAM-79] - _ = x[IBSFFV-80] - _ = x[IBSOPCNT-81] - _ = x[IBSOPCNTEXT-82] - _ = x[IBSOPSAM-83] - _ = x[IBSRDWROPCNT-84] - _ = x[IBSRIPINVALIDCHK-85] - _ = x[IBS_FETCH_CTLX-86] - _ = x[IBS_OPDATA4-87] - _ = x[IBS_OPFUSE-88] - _ = x[IBS_PREVENTHOST-89] - _ = x[IBS_ZEN4-90] - _ = x[IDPRED_CTRL-91] - _ = x[INT_WBINVD-92] - _ = x[INVLPGB-93] - _ = x[KEYLOCKER-94] - _ = x[KEYLOCKERW-95] - _ = x[LAHF-96] - _ = x[LAM-97] - _ = x[LBRVIRT-98] - _ = x[LZCNT-99] - _ = x[MCAOVERFLOW-100] - _ = x[MCDT_NO-101] - _ = x[MCOMMIT-102] - _ = x[MD_CLEAR-103] - _ = x[MMX-104] - _ = x[MMXEXT-105] - _ = x[MOVBE-106] - _ = x[MOVDIR64B-107] - _ = x[MOVDIRI-108] - _ = x[MOVSB_ZL-109] - _ = x[MOVU-110] - _ = x[MPX-111] - _ = x[MSRIRC-112] - _ = x[MSRLIST-113] - _ = x[MSR_PAGEFLUSH-114] - _ = x[NRIPS-115] - _ = x[NX-116] - _ = x[OSXSAVE-117] - _ = x[PCONFIG-118] - _ = x[POPCNT-119] - _ = x[PPIN-120] - _ = x[PREFETCHI-121] - _ = x[PSFD-122] - _ = x[RDPRU-123] - _ = x[RDRAND-124] - _ = x[RDSEED-125] - _ = x[RDTSCP-126] - _ = x[RRSBA_CTRL-127] - _ = x[RTM-128] - _ = x[RTM_ALWAYS_ABORT-129] - _ = x[SERIALIZE-130] - _ = x[SEV-131] - _ = x[SEV_64BIT-132] - _ = x[SEV_ALTERNATIVE-133] - _ = x[SEV_DEBUGSWAP-134] - _ = x[SEV_ES-135] - _ = x[SEV_RESTRICTED-136] - _ = x[SEV_SNP-137] - _ = x[SGX-138] - _ = x[SGXLC-139] - _ = x[SHA-140] - _ = x[SME-141] - _ = x[SME_COHERENT-142] - _ = x[SPEC_CTRL_SSBD-143] - _ = x[SRBDS_CTRL-144] - _ = x[SSE-145] - _ = x[SSE2-146] - _ = x[SSE3-147] - _ = x[SSE4-148] - _ = x[SSE42-149] - _ = x[SSE4A-150] - _ = x[SSSE3-151] - _ = x[STIBP-152] - _ = x[STIBP_ALWAYSON-153] - _ = x[STOSB_SHORT-154] - _ = x[SUCCOR-155] - _ = x[SVM-156] - _ = x[SVMDA-157] - _ = x[SVMFBASID-158] - _ = x[SVML-159] - _ = x[SVMNP-160] - _ = x[SVMPF-161] - _ = x[SVMPFT-162] - _ = x[SYSCALL-163] - _ = x[SYSEE-164] - _ = x[TBM-165] - _ = x[TDX_GUEST-166] - _ = x[TLB_FLUSH_NESTED-167] - _ = x[TME-168] - _ = x[TOPEXT-169] - _ = x[TSCRATEMSR-170] - _ = x[TSXLDTRK-171] - _ = x[VAES-172] - _ = x[VMCBCLEAN-173] - _ = x[VMPL-174] - _ = x[VMSA_REGPROT-175] - _ = x[VMX-176] - _ = x[VPCLMULQDQ-177] - _ = x[VTE-178] - _ = x[WAITPKG-179] - _ = x[WBNOINVD-180] - _ = x[WRMSRNS-181] - _ = x[X87-182] - _ = x[XGETBV1-183] - _ = x[XOP-184] - _ = x[XSAVE-185] - _ = x[XSAVEC-186] - _ = x[XSAVEOPT-187] - _ = x[XSAVES-188] - _ = x[AESARM-189] - _ = x[ARMCPUID-190] - _ = x[ASIMD-191] - _ = x[ASIMDDP-192] - _ = x[ASIMDHP-193] - _ = x[ASIMDRDM-194] - _ = x[ATOMICS-195] - _ = x[CRC32-196] - _ = x[DCPOP-197] - _ = x[EVTSTRM-198] - _ = x[FCMA-199] - _ = x[FP-200] - _ = x[FPHP-201] - _ = x[GPA-202] - _ = x[JSCVT-203] - _ = x[LRCPC-204] - _ = x[PMULL-205] - _ = x[SHA1-206] - _ = x[SHA2-207] - _ = x[SHA3-208] - _ = x[SHA512-209] - _ = x[SM3-210] - _ = x[SM4-211] - _ = x[SVE-212] - _ = x[lastID-213] + _ = x[IBPB_BRTYPE-74] + _ = x[IBRS-75] + _ = x[IBRS_PREFERRED-76] + _ = x[IBRS_PROVIDES_SMP-77] + _ = x[IBS-78] + _ = x[IBSBRNTRGT-79] + _ = x[IBSFETCHSAM-80] + _ = x[IBSFFV-81] + _ = x[IBSOPCNT-82] + _ = x[IBSOPCNTEXT-83] + _ = x[IBSOPSAM-84] + _ = x[IBSRDWROPCNT-85] + _ = x[IBSRIPINVALIDCHK-86] + _ = x[IBS_FETCH_CTLX-87] + _ = x[IBS_OPDATA4-88] + _ = x[IBS_OPFUSE-89] + _ = x[IBS_PREVENTHOST-90] + _ = x[IBS_ZEN4-91] + _ = x[IDPRED_CTRL-92] + _ = x[INT_WBINVD-93] + _ = x[INVLPGB-94] + _ = x[KEYLOCKER-95] + _ = x[KEYLOCKERW-96] + _ = x[LAHF-97] + _ = x[LAM-98] + _ = x[LBRVIRT-99] + _ = x[LZCNT-100] + _ = x[MCAOVERFLOW-101] + _ = x[MCDT_NO-102] + _ = x[MCOMMIT-103] + _ = x[MD_CLEAR-104] + _ = x[MMX-105] + _ = x[MMXEXT-106] + _ = x[MOVBE-107] + _ = x[MOVDIR64B-108] + _ = x[MOVDIRI-109] + _ = x[MOVSB_ZL-110] + _ = x[MOVU-111] + _ = x[MPX-112] + _ = x[MSRIRC-113] + _ = x[MSRLIST-114] + _ = x[MSR_PAGEFLUSH-115] + _ = x[NRIPS-116] + _ = x[NX-117] + _ = x[OSXSAVE-118] + _ = x[PCONFIG-119] + _ = x[POPCNT-120] + _ = x[PPIN-121] + _ = x[PREFETCHI-122] + _ = x[PSFD-123] + _ = x[RDPRU-124] + _ = x[RDRAND-125] + _ = x[RDSEED-126] + _ = x[RDTSCP-127] + _ = x[RRSBA_CTRL-128] + _ = x[RTM-129] + _ = x[RTM_ALWAYS_ABORT-130] + _ = x[SBPB-131] + _ = x[SERIALIZE-132] + _ = x[SEV-133] + _ = x[SEV_64BIT-134] + _ = x[SEV_ALTERNATIVE-135] + _ = x[SEV_DEBUGSWAP-136] + _ = x[SEV_ES-137] + _ = x[SEV_RESTRICTED-138] + _ = x[SEV_SNP-139] + _ = x[SGX-140] + _ = x[SGXLC-141] + _ = x[SHA-142] + _ = x[SME-143] + _ = x[SME_COHERENT-144] + _ = x[SPEC_CTRL_SSBD-145] + _ = x[SRBDS_CTRL-146] + _ = x[SRSO_MSR_FIX-147] + _ = x[SRSO_NO-148] + _ = x[SRSO_USER_KERNEL_NO-149] + _ = x[SSE-150] + _ = x[SSE2-151] + _ = x[SSE3-152] + _ = x[SSE4-153] + _ = x[SSE42-154] + _ = x[SSE4A-155] + _ = x[SSSE3-156] + _ = x[STIBP-157] + _ = x[STIBP_ALWAYSON-158] + _ = x[STOSB_SHORT-159] + _ = x[SUCCOR-160] + _ = x[SVM-161] + _ = x[SVMDA-162] + _ = x[SVMFBASID-163] + _ = x[SVML-164] + _ = x[SVMNP-165] + _ = x[SVMPF-166] + _ = x[SVMPFT-167] + _ = x[SYSCALL-168] + _ = x[SYSEE-169] + _ = x[TBM-170] + _ = x[TDX_GUEST-171] + _ = x[TLB_FLUSH_NESTED-172] + _ = x[TME-173] + _ = x[TOPEXT-174] + _ = x[TSCRATEMSR-175] + _ = x[TSXLDTRK-176] + _ = x[VAES-177] + _ = x[VMCBCLEAN-178] + _ = x[VMPL-179] + _ = x[VMSA_REGPROT-180] + _ = x[VMX-181] + _ = x[VPCLMULQDQ-182] + _ = x[VTE-183] + _ = x[WAITPKG-184] + _ = x[WBNOINVD-185] + _ = x[WRMSRNS-186] + _ = x[X87-187] + _ = x[XGETBV1-188] + _ = x[XOP-189] + _ = x[XSAVE-190] + _ = x[XSAVEC-191] + _ = x[XSAVEOPT-192] + _ = x[XSAVES-193] + _ = x[AESARM-194] + _ = x[ARMCPUID-195] + _ = x[ASIMD-196] + _ = x[ASIMDDP-197] + _ = x[ASIMDHP-198] + _ = x[ASIMDRDM-199] + _ = x[ATOMICS-200] + _ = x[CRC32-201] + _ = x[DCPOP-202] + _ = x[EVTSTRM-203] + _ = x[FCMA-204] + _ = x[FP-205] + _ = x[FPHP-206] + _ = x[GPA-207] + _ = x[JSCVT-208] + _ = x[LRCPC-209] + _ = x[PMULL-210] + _ = x[SHA1-211] + _ = x[SHA2-212] + _ = x[SHA3-213] + _ = x[SHA512-214] + _ = x[SM3-215] + _ = x[SM4-216] + _ = x[SVE-217] + _ = x[lastID-218] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 554, 568, 585, 588, 598, 609, 615, 623, 634, 642, 654, 670, 684, 695, 705, 720, 728, 739, 749, 756, 765, 775, 779, 782, 789, 794, 805, 812, 819, 827, 830, 836, 841, 850, 857, 865, 869, 872, 878, 885, 898, 903, 905, 912, 919, 925, 929, 938, 942, 947, 953, 959, 965, 975, 978, 994, 1003, 1006, 1015, 1030, 1043, 1049, 1063, 1070, 1073, 1078, 1081, 1084, 1096, 1110, 1120, 1123, 1127, 1131, 1135, 1140, 1145, 1150, 1155, 1169, 1180, 1186, 1189, 1194, 1203, 1207, 1212, 1217, 1223, 1230, 1235, 1238, 1247, 1263, 1266, 1272, 1282, 1290, 1294, 1303, 1307, 1319, 1322, 1332, 1335, 1342, 1350, 1357, 1360, 1367, 1370, 1375, 1381, 1389, 1395, 1401, 1409, 1414, 1421, 1428, 1436, 1443, 1448, 1453, 1460, 1464, 1466, 1470, 1473, 1478, 1483, 1488, 1492, 1496, 1500, 1506, 1509, 1512, 1515, 1521} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 561, 565, 579, 596, 599, 609, 620, 626, 634, 645, 653, 665, 681, 695, 706, 716, 731, 739, 750, 760, 767, 776, 786, 790, 793, 800, 805, 816, 823, 830, 838, 841, 847, 852, 861, 868, 876, 880, 883, 889, 896, 909, 914, 916, 923, 930, 936, 940, 949, 953, 958, 964, 970, 976, 986, 989, 1005, 1009, 1018, 1021, 1030, 1045, 1058, 1064, 1078, 1085, 1088, 1093, 1096, 1099, 1111, 1125, 1135, 1147, 1154, 1173, 1176, 1180, 1184, 1188, 1193, 1198, 1203, 1208, 1222, 1233, 1239, 1242, 1247, 1256, 1260, 1265, 1270, 1276, 1283, 1288, 1291, 1300, 1316, 1319, 1325, 1335, 1343, 1347, 1356, 1360, 1372, 1375, 1385, 1388, 1395, 1403, 1410, 1413, 1420, 1423, 1428, 1434, 1442, 1448, 1454, 1462, 1467, 1474, 1481, 1489, 1496, 1501, 1506, 1513, 1517, 1519, 1523, 1526, 1531, 1536, 1541, 1545, 1549, 1553, 1559, 1562, 1565, 1568, 1574} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/leodido/go-urn/.gitignore b/vendor/github.com/leodido/go-urn/.gitignore index 89d4bc55..427454f8 100644 --- a/vendor/github.com/leodido/go-urn/.gitignore +++ b/vendor/github.com/leodido/go-urn/.gitignore @@ -9,4 +9,5 @@ *.txt vendor/ -/removecomments \ No newline at end of file +/removecomments +/snake2camel \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/README.md b/vendor/github.com/leodido/go-urn/README.md index 731eecbb..619475bf 100644 --- a/vendor/github.com/leodido/go-urn/README.md +++ b/vendor/github.com/leodido/go-urn/README.md @@ -2,21 +2,34 @@ **A parser for URNs**. -> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1). +> As seen on [RFC 2141](https://datatracker.ietf.org/doc/html/rfc2141), [RFC 7643](https://datatracker.ietf.org/doc/html/rfc7643#section-10), and on [RFC 8141](https://datatracker.ietf.org/doc/html/rfc8141). [API documentation](https://godoc.org/github.com/leodido/go-urn). +Starting with version 1.3 this library also supports [RFC 7643 SCIM URNs](https://datatracker.ietf.org/doc/html/rfc7643#section-10). + +Starting with version 1.4 this library also supports [RFC 8141 URNs (2017)](https://datatracker.ietf.org/doc/html/rfc8141). + ## Installation ``` go get github.com/leodido/go-urn ``` +## Features + +1. RFC 2141 URNs parsing (default) +2. RFC 8141 URNs parsing (supersedes RFC 2141) +3. RFC 7643 SCIM URNs parsing +4. Normalization as per RFCs +5. Lexical equivalence as per RFCs +6. Precise, fine-grained errors + ## Performances This implementation results to be really fast. -Usually below ½ microsecond on my machine[1](#mymachine). +Usually below 400 ns on my machine[1](#mymachine). Notice it also performs, while parsing: @@ -24,35 +37,36 @@ Notice it also performs, while parsing: 2. specific-string normalization ``` -ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op -ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op -ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op -ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op -ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op -ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op -ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op -ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op -ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op -ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op -ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op -no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op -no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op -no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op -no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op -no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op -no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op -no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op -no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op -no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op +ok/00/urn:a:b______________________________________/-10 51372006 109.0 ns/op 275 B/op 3 allocs/op +ok/01/URN:foo:a123,456_____________________________/-10 36024072 160.8 ns/op 296 B/op 6 allocs/op +ok/02/urn:foo:a123%2C456___________________________/-10 31901007 188.4 ns/op 320 B/op 7 allocs/op +ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-10 22736756 266.6 ns/op 376 B/op 6 allocs/op +ok/04/urn:ietf:params:scim:schemas:extension:enterp/-10 18291859 335.2 ns/op 408 B/op 6 allocs/op +ok/05/urn:ietf:params:scim:schemas:extension:enterp/-10 15283087 379.4 ns/op 440 B/op 6 allocs/op +ok/06/urn:burnout:nss______________________________/-10 39407593 155.1 ns/op 288 B/op 6 allocs/op +ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-10 27832718 211.4 ns/op 307 B/op 4 allocs/op +ok/08/urn:urnurnurn:urn____________________________/-10 33269596 168.1 ns/op 293 B/op 6 allocs/op +ok/09/urn:ciao:!!*_________________________________/-10 41100675 148.8 ns/op 288 B/op 6 allocs/op +ok/10/urn:ciao:=@__________________________________/-10 37214253 149.7 ns/op 284 B/op 6 allocs/op +ok/11/urn:ciao:@!=%2C(xyz)+a,b.*@g=$_'_____________/-10 26534240 229.8 ns/op 336 B/op 7 allocs/op +ok/12/URN:x:abc%1Dz%2F%3az_________________________/-10 28166396 211.8 ns/op 336 B/op 7 allocs/op +no/13/URN:---xxx:x_________________________________/-10 23635159 255.6 ns/op 419 B/op 5 allocs/op +no/14/urn::colon:nss_______________________________/-10 23594779 258.4 ns/op 419 B/op 5 allocs/op +no/15/URN:@,:x_____________________________________/-10 23742535 261.5 ns/op 419 B/op 5 allocs/op +no/16/URN:URN:NSS__________________________________/-10 27432714 223.3 ns/op 371 B/op 5 allocs/op +no/17/urn:UrN:NSS__________________________________/-10 26922117 224.9 ns/op 371 B/op 5 allocs/op +no/18/urn:a:%______________________________________/-10 24926733 224.6 ns/op 371 B/op 5 allocs/op +no/19/urn:urn:NSS__________________________________/-10 27652641 220.7 ns/op 371 B/op 5 allocs/op ``` ---- +* [1]: Apple M1 Pro -* [1]: Intel Core i7-7600U CPU @ 2.80GHz - ---- ## Example + +For more examples take a look at the [examples file](examples_test.go). + + ```go package main @@ -64,6 +78,35 @@ import ( func main() { var uid = "URN:foo:a123,456" + // Parse the input string as a RFC 2141 URN only + u, e := urn.NewMachine().Parse(uid) + if e != nil { + fmt.Errorf(err) + + return + } + + fmt.Println(u.ID) + fmt.Println(u.SS) + + // Output: + // foo + // a123,456 +} +``` + +```go +package main + +import ( + "fmt" + "github.com/leodido/go-urn" +) + +func main() { + var uid = "URN:foo:a123,456" + + // Parse the input string as a RFC 2141 URN only u, ok := urn.Parse([]byte(uid)) if !ok { panic("error parsing urn") @@ -78,4 +121,33 @@ func main() { } ``` -[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon) \ No newline at end of file +```go +package main + +import ( + "fmt" + "github.com/leodido/go-urn" +) + +func main() { + input := "urn:ietf:params:scim:api:messages:2.0:ListResponse" + + // Parsing the input string as a RFC 7643 SCIM URN + u, ok := urn.Parse([]byte(input), urn.WithParsingMode(urn.RFC7643Only)) + if !ok { + panic("error parsing urn") + } + + fmt.Println(u.IsSCIM()) + scim := u.SCIM() + fmt.Println(scim.Type.String()) + fmt.Println(scim.Name) + fmt.Println(scim.Other) + + // Output: + // true + // api + // messages + // 2.0:ListResponse +} +``` \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/kind.go b/vendor/github.com/leodido/go-urn/kind.go new file mode 100644 index 00000000..f5e140f0 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/kind.go @@ -0,0 +1,10 @@ +package urn + +type Kind int + +const ( + NONE Kind = iota + RFC2141 + RFC7643 + RFC8141 +) diff --git a/vendor/github.com/leodido/go-urn/machine.go b/vendor/github.com/leodido/go-urn/machine.go index f8d57b41..aec1ba69 100644 --- a/vendor/github.com/leodido/go-urn/machine.go +++ b/vendor/github.com/leodido/go-urn/machine.go @@ -2,27 +2,98 @@ package urn import ( "fmt" + + scimschema "github.com/leodido/go-urn/scim/schema" ) var ( - errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" - errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" - errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" - errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" - errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" - errParse = "parsing error [col %d]" + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]" + errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]" + errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]" + errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]" + errSCIMOther = "expecting a well-formed other SCIM part [col %d]" + errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]" + err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]" + err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]" + err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]" + err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]" + err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]" + err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" + err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" ) +var _toStateActions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 33, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, +} + +var _eofActions []byte = []byte{ + 0, 1, 1, 1, 1, 4, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 8, 9, + 9, 4, 4, 11, 1, 1, 1, 1, + 12, 12, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, + 12, 14, 14, 14, 14, 16, 18, 20, + 20, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 1, 1, 1, 1, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 24, 24, 25, 25, 0, 26, 28, + 28, 29, 29, 30, 30, 26, 26, 31, + 31, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 21, + 21, 22, 22, 22, 34, 34, 35, 37, + 37, 38, 40, 41, 41, 38, 42, 42, + 42, 44, 42, 48, 48, 48, 50, 44, + 50, 0, +} const start int = 1 -const firstFinal int = 44 +const firstFinal int = 172 -const enFail int = 46 +const enScimOnly int = 44 +const enRfc8141Only int = 83 +const enFail int = 193 const enMain int = 1 // Machine is the interface representing the FSM type Machine interface { Error() error Parse(input []byte) (*URN, error) + WithParsingMode(ParsingMode) } type machine struct { @@ -30,12 +101,24 @@ type machine struct { cs int p, pe, eof, pb int err error - tolower []int + startParsingAt int + parsingMode ParsingMode + parsingModeSet bool } // NewMachine creates a new FSM able to parse RFC 2141 strings. -func NewMachine() Machine { - m := &machine{} +func NewMachine(options ...Option) Machine { + m := &machine{ + parsingModeSet: false, + } + + for _, o := range options { + o(m) + } + // Set default parsing mode + if !m.parsingModeSet { + m.WithParsingMode(DefaultParsingMode) + } return m } @@ -51,7 +134,7 @@ func (m *machine) text() []byte { return m.data[m.pb:m.p] } -// Parse parses the input byte array as a RFC 2141 string. +// Parse parses the input byte array as a RFC 2141 or RFC7643 string. func (m *machine) Parse(input []byte) (*URN, error) { m.data = input m.p = 0 @@ -59,1619 +142,4881 @@ func (m *machine) Parse(input []byte) (*URN, error) { m.pe = len(input) m.eof = len(input) m.err = nil - m.tolower = []int{} - output := &URN{} - { - m.cs = start + m.cs = m.startParsingAt + output := &URN{ + tolower: []int{}, } { if (m.p) == (m.pe) { goto _testEof } + if m.cs == 0 { + goto _out + } + _resume: switch m.cs { case 1: - goto stCase1 + switch (m.data)[(m.p)] { + case 85: + goto tr1 + case 117: + goto tr1 + } + goto tr0 case 0: - goto stCase0 + goto _out case 2: - goto stCase2 + switch (m.data)[(m.p)] { + case 82: + goto tr2 + case 114: + goto tr2 + } + goto tr0 case 3: - goto stCase3 + switch (m.data)[(m.p)] { + case 78: + goto tr3 + case 110: + goto tr3 + } + goto tr0 case 4: - goto stCase4 + if (m.data)[(m.p)] == 58 { + goto tr4 + } + goto tr0 case 5: - goto stCase5 + switch (m.data)[(m.p)] { + case 85: + goto tr7 + case 117: + goto tr7 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr6 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr6 + } + default: + goto tr6 + } + goto tr5 case 6: - goto stCase6 + switch (m.data)[(m.p)] { + case 45: + goto tr9 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr9 + } + default: + goto tr9 + } + goto tr8 case 7: - goto stCase7 + switch (m.data)[(m.p)] { + case 45: + goto tr11 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr11 + } + default: + goto tr11 + } + goto tr8 case 8: - goto stCase8 + switch (m.data)[(m.p)] { + case 45: + goto tr12 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr12 + } + default: + goto tr12 + } + goto tr8 case 9: - goto stCase9 + switch (m.data)[(m.p)] { + case 45: + goto tr13 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr13 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr13 + } + default: + goto tr13 + } + goto tr8 case 10: - goto stCase10 + switch (m.data)[(m.p)] { + case 45: + goto tr14 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr14 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr14 + } + default: + goto tr14 + } + goto tr8 case 11: - goto stCase11 + switch (m.data)[(m.p)] { + case 45: + goto tr15 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr15 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr15 + } + default: + goto tr15 + } + goto tr8 case 12: - goto stCase12 + switch (m.data)[(m.p)] { + case 45: + goto tr16 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr16 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr16 + } + default: + goto tr16 + } + goto tr8 case 13: - goto stCase13 + switch (m.data)[(m.p)] { + case 45: + goto tr17 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr17 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr17 + } + default: + goto tr17 + } + goto tr8 case 14: - goto stCase14 + switch (m.data)[(m.p)] { + case 45: + goto tr18 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr18 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr18 + } + default: + goto tr18 + } + goto tr8 case 15: - goto stCase15 + switch (m.data)[(m.p)] { + case 45: + goto tr19 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr19 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr19 + } + default: + goto tr19 + } + goto tr8 case 16: - goto stCase16 + switch (m.data)[(m.p)] { + case 45: + goto tr20 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr20 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr20 + } + default: + goto tr20 + } + goto tr8 case 17: - goto stCase17 + switch (m.data)[(m.p)] { + case 45: + goto tr21 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr21 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr21 + } + default: + goto tr21 + } + goto tr8 case 18: - goto stCase18 + switch (m.data)[(m.p)] { + case 45: + goto tr22 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr22 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr22 + } + default: + goto tr22 + } + goto tr8 case 19: - goto stCase19 + switch (m.data)[(m.p)] { + case 45: + goto tr23 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr23 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr23 + } + default: + goto tr23 + } + goto tr8 case 20: - goto stCase20 + switch (m.data)[(m.p)] { + case 45: + goto tr24 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr24 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr24 + } + default: + goto tr24 + } + goto tr8 case 21: - goto stCase21 + switch (m.data)[(m.p)] { + case 45: + goto tr25 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr25 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr25 + } + default: + goto tr25 + } + goto tr8 case 22: - goto stCase22 + switch (m.data)[(m.p)] { + case 45: + goto tr26 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr26 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr26 + } + default: + goto tr26 + } + goto tr8 case 23: - goto stCase23 + switch (m.data)[(m.p)] { + case 45: + goto tr27 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr27 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr27 + } + default: + goto tr27 + } + goto tr8 case 24: - goto stCase24 + switch (m.data)[(m.p)] { + case 45: + goto tr28 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr28 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr28 + } + default: + goto tr28 + } + goto tr8 case 25: - goto stCase25 + switch (m.data)[(m.p)] { + case 45: + goto tr29 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr29 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr29 + } + default: + goto tr29 + } + goto tr8 case 26: - goto stCase26 + switch (m.data)[(m.p)] { + case 45: + goto tr30 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr30 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr30 + } + default: + goto tr30 + } + goto tr8 case 27: - goto stCase27 + switch (m.data)[(m.p)] { + case 45: + goto tr31 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr31 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr31 + } + default: + goto tr31 + } + goto tr8 case 28: - goto stCase28 + switch (m.data)[(m.p)] { + case 45: + goto tr32 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr32 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr32 + } + default: + goto tr32 + } + goto tr8 case 29: - goto stCase29 + switch (m.data)[(m.p)] { + case 45: + goto tr33 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr33 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr33 + } + default: + goto tr33 + } + goto tr8 case 30: - goto stCase30 + switch (m.data)[(m.p)] { + case 45: + goto tr34 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr34 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr34 + } + default: + goto tr34 + } + goto tr8 case 31: - goto stCase31 + switch (m.data)[(m.p)] { + case 45: + goto tr35 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr35 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr35 + } + default: + goto tr35 + } + goto tr8 case 32: - goto stCase32 + switch (m.data)[(m.p)] { + case 45: + goto tr36 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr36 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr36 + } + default: + goto tr36 + } + goto tr8 case 33: - goto stCase33 + switch (m.data)[(m.p)] { + case 45: + goto tr37 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr37 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr37 + } + default: + goto tr37 + } + goto tr8 case 34: - goto stCase34 + switch (m.data)[(m.p)] { + case 45: + goto tr38 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr38 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr38 + } + default: + goto tr38 + } + goto tr8 case 35: - goto stCase35 + switch (m.data)[(m.p)] { + case 45: + goto tr39 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr39 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr39 + } + default: + goto tr39 + } + goto tr8 case 36: - goto stCase36 + switch (m.data)[(m.p)] { + case 45: + goto tr40 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr40 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr40 + } + default: + goto tr40 + } + goto tr8 case 37: - goto stCase37 + if (m.data)[(m.p)] == 58 { + goto tr10 + } + goto tr8 case 38: - goto stCase38 - case 44: - goto stCase44 + switch (m.data)[(m.p)] { + case 33: + goto tr42 + case 36: + goto tr42 + case 37: + goto tr43 + case 61: + goto tr42 + case 95: + goto tr42 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr42 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr42 + } + case (m.data)[(m.p)] >= 64: + goto tr42 + } + default: + goto tr42 + } + goto tr41 + case 172: + switch (m.data)[(m.p)] { + case 33: + goto tr212 + case 36: + goto tr212 + case 37: + goto tr213 + case 61: + goto tr212 + case 95: + goto tr212 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr212 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr212 + } + case (m.data)[(m.p)] >= 64: + goto tr212 + } + default: + goto tr212 + } + goto tr41 case 39: - goto stCase39 + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr45 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr45 + } + default: + goto tr46 + } + goto tr44 case 40: - goto stCase40 - case 45: - goto stCase45 + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr47 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr47 + } + default: + goto tr48 + } + goto tr44 + case 173: + switch (m.data)[(m.p)] { + case 33: + goto tr212 + case 36: + goto tr212 + case 37: + goto tr213 + case 61: + goto tr212 + case 95: + goto tr212 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr212 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr212 + } + case (m.data)[(m.p)] >= 64: + goto tr212 + } + default: + goto tr212 + } + goto tr44 case 41: - goto stCase41 + switch (m.data)[(m.p)] { + case 45: + goto tr9 + case 58: + goto tr10 + case 82: + goto tr49 + case 114: + goto tr49 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr9 + } + default: + goto tr9 + } + goto tr5 case 42: - goto stCase42 + switch (m.data)[(m.p)] { + case 45: + goto tr11 + case 58: + goto tr10 + case 78: + goto tr50 + case 110: + goto tr50 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr11 + } + default: + goto tr11 + } + goto tr5 case 43: - goto stCase43 + if (m.data)[(m.p)] == 45 { + goto tr12 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr12 + } + default: + goto tr12 + } + goto tr51 + case 44: + switch (m.data)[(m.p)] { + case 85: + goto tr52 + case 117: + goto tr52 + } + goto tr0 + case 45: + switch (m.data)[(m.p)] { + case 82: + goto tr53 + case 114: + goto tr53 + } + goto tr0 case 46: - goto stCase46 - } - goto stOut - stCase1: - switch (m.data)[(m.p)] { + switch (m.data)[(m.p)] { + case 78: + goto tr54 + case 110: + goto tr54 + } + goto tr0 + case 47: + if (m.data)[(m.p)] == 58 { + goto tr55 + } + goto tr0 + case 48: + if (m.data)[(m.p)] == 105 { + goto tr57 + } + goto tr56 + case 49: + if (m.data)[(m.p)] == 101 { + goto tr58 + } + goto tr56 + case 50: + if (m.data)[(m.p)] == 116 { + goto tr59 + } + goto tr56 + case 51: + if (m.data)[(m.p)] == 102 { + goto tr60 + } + goto tr56 + case 52: + if (m.data)[(m.p)] == 58 { + goto tr61 + } + goto tr56 + case 53: + if (m.data)[(m.p)] == 112 { + goto tr62 + } + goto tr56 + case 54: + if (m.data)[(m.p)] == 97 { + goto tr63 + } + goto tr56 + case 55: + if (m.data)[(m.p)] == 114 { + goto tr64 + } + goto tr56 + case 56: + if (m.data)[(m.p)] == 97 { + goto tr65 + } + goto tr56 + case 57: + if (m.data)[(m.p)] == 109 { + goto tr66 + } + goto tr56 + case 58: + if (m.data)[(m.p)] == 115 { + goto tr67 + } + goto tr56 + case 59: + if (m.data)[(m.p)] == 58 { + goto tr68 + } + goto tr56 + case 60: + if (m.data)[(m.p)] == 115 { + goto tr69 + } + goto tr56 + case 61: + if (m.data)[(m.p)] == 99 { + goto tr70 + } + goto tr56 + case 62: + if (m.data)[(m.p)] == 105 { + goto tr71 + } + goto tr56 + case 63: + if (m.data)[(m.p)] == 109 { + goto tr72 + } + goto tr56 + case 64: + if (m.data)[(m.p)] == 58 { + goto tr73 + } + goto tr56 + case 65: + switch (m.data)[(m.p)] { + case 97: + goto tr75 + case 112: + goto tr76 + case 115: + goto tr77 + } + goto tr74 + case 66: + if (m.data)[(m.p)] == 112 { + goto tr78 + } + goto tr74 + case 67: + if (m.data)[(m.p)] == 105 { + goto tr79 + } + goto tr74 + case 68: + if (m.data)[(m.p)] == 58 { + goto tr80 + } + goto tr74 + case 69: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr82 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr82 + } + default: + goto tr82 + } + goto tr81 + case 174: + if (m.data)[(m.p)] == 58 { + goto tr215 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr214 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr214 + } + default: + goto tr214 + } + goto tr81 + case 70: + switch (m.data)[(m.p)] { + case 33: + goto tr84 + case 36: + goto tr84 + case 37: + goto tr85 + case 61: + goto tr84 + case 95: + goto tr84 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr84 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr84 + } + case (m.data)[(m.p)] >= 64: + goto tr84 + } + default: + goto tr84 + } + goto tr83 + case 175: + switch (m.data)[(m.p)] { + case 33: + goto tr216 + case 36: + goto tr216 + case 37: + goto tr217 + case 61: + goto tr216 + case 95: + goto tr216 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr216 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr216 + } + case (m.data)[(m.p)] >= 64: + goto tr216 + } + default: + goto tr216 + } + goto tr83 + case 71: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr87 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr87 + } + default: + goto tr88 + } + goto tr86 + case 72: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr89 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr89 + } + default: + goto tr90 + } + goto tr86 + case 176: + switch (m.data)[(m.p)] { + case 33: + goto tr216 + case 36: + goto tr216 + case 37: + goto tr217 + case 61: + goto tr216 + case 95: + goto tr216 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr216 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr216 + } + case (m.data)[(m.p)] >= 64: + goto tr216 + } + default: + goto tr216 + } + goto tr86 + case 73: + if (m.data)[(m.p)] == 97 { + goto tr91 + } + goto tr74 + case 74: + if (m.data)[(m.p)] == 114 { + goto tr92 + } + goto tr74 + case 75: + if (m.data)[(m.p)] == 97 { + goto tr93 + } + goto tr74 + case 76: + if (m.data)[(m.p)] == 109 { + goto tr79 + } + goto tr74 + case 77: + if (m.data)[(m.p)] == 99 { + goto tr94 + } + goto tr74 + case 78: + if (m.data)[(m.p)] == 104 { + goto tr95 + } + goto tr74 + case 79: + if (m.data)[(m.p)] == 101 { + goto tr96 + } + goto tr74 + case 80: + if (m.data)[(m.p)] == 109 { + goto tr97 + } + goto tr74 + case 81: + if (m.data)[(m.p)] == 97 { + goto tr98 + } + goto tr74 + case 82: + if (m.data)[(m.p)] == 115 { + goto tr79 + } + goto tr74 + case 83: + switch (m.data)[(m.p)] { + case 85: + goto tr99 + case 117: + goto tr99 + } + goto tr0 + case 84: + switch (m.data)[(m.p)] { + case 82: + goto tr100 + case 114: + goto tr100 + } + goto tr0 case 85: - goto tr1 + switch (m.data)[(m.p)] { + case 78: + goto tr101 + case 110: + goto tr101 + } + goto tr0 + case 86: + if (m.data)[(m.p)] == 58 { + goto tr102 + } + goto tr0 + case 87: + switch (m.data)[(m.p)] { + case 85: + goto tr105 + case 117: + goto tr105 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr104 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr104 + } + default: + goto tr104 + } + goto tr103 + case 88: + if (m.data)[(m.p)] == 45 { + goto tr107 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr108 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr108 + } + default: + goto tr108 + } + goto tr106 + case 89: + if (m.data)[(m.p)] == 45 { + goto tr109 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr110 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr110 + } + default: + goto tr110 + } + goto tr106 + case 90: + if (m.data)[(m.p)] == 45 { + goto tr111 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr112 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr112 + } + default: + goto tr112 + } + goto tr106 + case 91: + if (m.data)[(m.p)] == 45 { + goto tr113 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr114 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr114 + } + default: + goto tr114 + } + goto tr106 + case 92: + if (m.data)[(m.p)] == 45 { + goto tr115 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr116 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr116 + } + default: + goto tr116 + } + goto tr106 + case 93: + if (m.data)[(m.p)] == 45 { + goto tr117 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr118 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr118 + } + default: + goto tr118 + } + goto tr106 + case 94: + if (m.data)[(m.p)] == 45 { + goto tr119 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr120 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr120 + } + default: + goto tr120 + } + goto tr106 + case 95: + if (m.data)[(m.p)] == 45 { + goto tr121 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr122 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr122 + } + default: + goto tr122 + } + goto tr106 + case 96: + if (m.data)[(m.p)] == 45 { + goto tr123 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr124 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr124 + } + default: + goto tr124 + } + goto tr106 + case 97: + if (m.data)[(m.p)] == 45 { + goto tr125 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr126 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr126 + } + default: + goto tr126 + } + goto tr106 + case 98: + if (m.data)[(m.p)] == 45 { + goto tr127 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr128 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr128 + } + default: + goto tr128 + } + goto tr106 + case 99: + if (m.data)[(m.p)] == 45 { + goto tr129 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr130 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr130 + } + default: + goto tr130 + } + goto tr106 + case 100: + if (m.data)[(m.p)] == 45 { + goto tr131 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr132 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr132 + } + default: + goto tr132 + } + goto tr106 + case 101: + if (m.data)[(m.p)] == 45 { + goto tr133 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr134 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr134 + } + default: + goto tr134 + } + goto tr106 + case 102: + if (m.data)[(m.p)] == 45 { + goto tr135 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr136 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr136 + } + default: + goto tr136 + } + goto tr106 + case 103: + if (m.data)[(m.p)] == 45 { + goto tr137 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr138 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr138 + } + default: + goto tr138 + } + goto tr106 + case 104: + if (m.data)[(m.p)] == 45 { + goto tr139 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr140 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr140 + } + default: + goto tr140 + } + goto tr106 + case 105: + if (m.data)[(m.p)] == 45 { + goto tr141 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr142 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr142 + } + default: + goto tr142 + } + goto tr106 + case 106: + if (m.data)[(m.p)] == 45 { + goto tr143 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr144 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr144 + } + default: + goto tr144 + } + goto tr106 + case 107: + if (m.data)[(m.p)] == 45 { + goto tr145 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr146 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr146 + } + default: + goto tr146 + } + goto tr106 + case 108: + if (m.data)[(m.p)] == 45 { + goto tr147 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr148 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr148 + } + default: + goto tr148 + } + goto tr106 + case 109: + if (m.data)[(m.p)] == 45 { + goto tr149 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr150 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr150 + } + default: + goto tr150 + } + goto tr106 + case 110: + if (m.data)[(m.p)] == 45 { + goto tr151 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr152 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr152 + } + default: + goto tr152 + } + goto tr106 + case 111: + if (m.data)[(m.p)] == 45 { + goto tr153 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr154 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr154 + } + default: + goto tr154 + } + goto tr106 + case 112: + if (m.data)[(m.p)] == 45 { + goto tr155 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr156 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr156 + } + default: + goto tr156 + } + goto tr106 + case 113: + if (m.data)[(m.p)] == 45 { + goto tr157 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr158 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr158 + } + default: + goto tr158 + } + goto tr106 + case 114: + if (m.data)[(m.p)] == 45 { + goto tr159 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr160 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr160 + } + default: + goto tr160 + } + goto tr106 + case 115: + if (m.data)[(m.p)] == 45 { + goto tr161 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr162 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr162 + } + default: + goto tr162 + } + goto tr106 + case 116: + if (m.data)[(m.p)] == 45 { + goto tr163 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr164 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr164 + } + default: + goto tr164 + } + goto tr106 case 117: - goto tr1 + if (m.data)[(m.p)] == 45 { + goto tr165 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr166 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr166 + } + default: + goto tr166 + } + goto tr106 + case 118: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr167 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr167 + } + default: + goto tr167 + } + goto tr106 + case 119: + if (m.data)[(m.p)] == 58 { + goto tr168 + } + goto tr106 + case 120: + switch (m.data)[(m.p)] { + case 33: + goto tr170 + case 37: + goto tr171 + case 61: + goto tr170 + case 95: + goto tr170 + case 126: + goto tr170 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr170 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr170 + } + case (m.data)[(m.p)] >= 64: + goto tr170 + } + default: + goto tr170 + } + goto tr169 + case 177: + switch (m.data)[(m.p)] { + case 33: + goto tr218 + case 35: + goto tr219 + case 37: + goto tr220 + case 61: + goto tr218 + case 63: + goto tr221 + case 95: + goto tr218 + case 126: + goto tr218 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr218 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr218 + } + default: + goto tr218 + } + goto tr169 + case 178: + switch (m.data)[(m.p)] { + case 33: + goto tr222 + case 37: + goto tr223 + case 61: + goto tr222 + case 95: + goto tr222 + case 126: + goto tr222 + } + switch { + case (m.data)[(m.p)] < 63: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr222 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr222 + } + default: + goto tr222 + } + goto tr183 + case 179: + switch (m.data)[(m.p)] { + case 33: + goto tr224 + case 37: + goto tr225 + case 61: + goto tr224 + case 95: + goto tr224 + case 126: + goto tr224 + } + switch { + case (m.data)[(m.p)] < 63: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr224 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr224 + } + default: + goto tr224 + } + goto tr183 + case 121: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr173 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr173 + } + default: + goto tr174 + } + goto tr172 + case 122: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr175 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr175 + } + default: + goto tr176 + } + goto tr172 + case 180: + switch (m.data)[(m.p)] { + case 33: + goto tr224 + case 37: + goto tr225 + case 61: + goto tr224 + case 95: + goto tr224 + case 126: + goto tr224 + } + switch { + case (m.data)[(m.p)] < 63: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr224 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr224 + } + default: + goto tr224 + } + goto tr172 + case 123: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr178 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr178 + } + default: + goto tr179 + } + goto tr177 + case 124: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr180 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr180 + } + default: + goto tr181 + } + goto tr177 + case 181: + switch (m.data)[(m.p)] { + case 33: + goto tr218 + case 35: + goto tr219 + case 37: + goto tr220 + case 61: + goto tr218 + case 63: + goto tr221 + case 95: + goto tr218 + case 126: + goto tr218 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr218 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr218 + } + default: + goto tr218 + } + goto tr177 + case 125: + switch (m.data)[(m.p)] { + case 43: + goto tr182 + case 61: + goto tr184 + } + goto tr183 + case 126: + switch (m.data)[(m.p)] { + case 33: + goto tr186 + case 37: + goto tr187 + case 61: + goto tr186 + case 63: + goto tr188 + case 95: + goto tr186 + case 126: + goto tr186 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr186 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr186 + } + case (m.data)[(m.p)] >= 64: + goto tr186 + } + default: + goto tr186 + } + goto tr185 + case 182: + switch (m.data)[(m.p)] { + case 33: + goto tr226 + case 35: + goto tr227 + case 37: + goto tr228 + case 61: + goto tr226 + case 63: + goto tr229 + case 95: + goto tr226 + case 126: + goto tr226 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr226 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr226 + } + default: + goto tr226 + } + goto tr185 + case 127: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr190 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr190 + } + default: + goto tr191 + } + goto tr189 + case 128: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr192 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr192 + } + default: + goto tr193 + } + goto tr189 + case 183: + switch (m.data)[(m.p)] { + case 33: + goto tr226 + case 35: + goto tr227 + case 37: + goto tr228 + case 61: + goto tr226 + case 63: + goto tr229 + case 95: + goto tr226 + case 126: + goto tr226 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr226 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr226 + } + default: + goto tr226 + } + goto tr189 + case 184: + switch (m.data)[(m.p)] { + case 33: + goto tr226 + case 35: + goto tr227 + case 37: + goto tr228 + case 43: + goto tr230 + case 61: + goto tr231 + case 63: + goto tr229 + case 95: + goto tr226 + case 126: + goto tr226 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr226 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr226 + } + default: + goto tr226 + } + goto tr185 + case 185: + switch (m.data)[(m.p)] { + case 33: + goto tr232 + case 35: + goto tr233 + case 37: + goto tr234 + case 47: + goto tr226 + case 61: + goto tr232 + case 63: + goto tr235 + case 95: + goto tr232 + case 126: + goto tr232 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr232 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr232 + } + default: + goto tr232 + } + goto tr185 + case 186: + switch (m.data)[(m.p)] { + case 33: + goto tr204 + case 35: + goto tr227 + case 37: + goto tr237 + case 47: + goto tr226 + case 61: + goto tr204 + case 63: + goto tr229 + case 95: + goto tr204 + case 126: + goto tr204 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr204 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr204 + } + default: + goto tr204 + } + goto tr236 + case 187: + switch (m.data)[(m.p)] { + case 33: + goto tr238 + case 35: + goto tr239 + case 37: + goto tr240 + case 61: + goto tr238 + case 63: + goto tr241 + case 95: + goto tr238 + case 126: + goto tr238 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr238 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr238 + } + default: + goto tr238 + } + goto tr203 + case 129: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr195 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr195 + } + default: + goto tr196 + } + goto tr194 + case 130: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr197 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr197 + } + default: + goto tr198 + } + goto tr194 + case 188: + switch (m.data)[(m.p)] { + case 33: + goto tr238 + case 35: + goto tr239 + case 37: + goto tr240 + case 61: + goto tr238 + case 63: + goto tr241 + case 95: + goto tr238 + case 126: + goto tr238 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr238 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr238 + } + default: + goto tr238 + } + goto tr194 + case 189: + switch (m.data)[(m.p)] { + case 33: + goto tr238 + case 35: + goto tr239 + case 37: + goto tr240 + case 61: + goto tr242 + case 63: + goto tr241 + case 95: + goto tr238 + case 126: + goto tr238 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr238 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr238 + } + default: + goto tr238 + } + goto tr203 + case 190: + switch (m.data)[(m.p)] { + case 33: + goto tr243 + case 35: + goto tr244 + case 37: + goto tr245 + case 47: + goto tr238 + case 61: + goto tr243 + case 63: + goto tr246 + case 95: + goto tr243 + case 126: + goto tr243 + } + switch { + case (m.data)[(m.p)] < 64: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 { + goto tr243 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr243 + } + default: + goto tr243 + } + goto tr203 + case 131: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr200 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr200 + } + default: + goto tr201 + } + goto tr199 + case 132: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr197 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr197 + } + default: + goto tr198 + } + goto tr199 + case 133: + if (m.data)[(m.p)] == 43 { + goto tr202 + } + goto tr185 + case 191: + switch (m.data)[(m.p)] { + case 33: + goto tr232 + case 35: + goto tr233 + case 37: + goto tr234 + case 61: + goto tr232 + case 63: + goto tr247 + case 95: + goto tr232 + case 126: + goto tr232 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr232 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr232 + } + case (m.data)[(m.p)] >= 64: + goto tr232 + } + default: + goto tr232 + } + goto tr185 + case 134: + switch (m.data)[(m.p)] { + case 43: + goto tr202 + case 61: + goto tr184 + } + goto tr185 + case 135: + switch (m.data)[(m.p)] { + case 33: + goto tr204 + case 37: + goto tr205 + case 61: + goto tr204 + case 63: + goto tr206 + case 95: + goto tr204 + case 126: + goto tr204 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr204 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr204 + } + case (m.data)[(m.p)] >= 64: + goto tr204 + } + default: + goto tr204 + } + goto tr203 + case 136: + if (m.data)[(m.p)] == 61 { + goto tr207 + } + goto tr203 + case 192: + switch (m.data)[(m.p)] { + case 33: + goto tr243 + case 35: + goto tr244 + case 37: + goto tr245 + case 61: + goto tr243 + case 63: + goto tr248 + case 95: + goto tr243 + case 126: + goto tr243 + } + switch { + case (m.data)[(m.p)] < 48: + if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr243 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr243 + } + case (m.data)[(m.p)] >= 64: + goto tr243 + } + default: + goto tr243 + } + goto tr203 + case 137: + if (m.data)[(m.p)] == 58 { + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr167 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr167 + } + default: + goto tr167 + } + goto tr106 + case 138: + switch (m.data)[(m.p)] { + case 45: + goto tr165 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr166 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr166 + } + default: + goto tr166 + } + goto tr106 + case 139: + switch (m.data)[(m.p)] { + case 45: + goto tr163 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr164 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr164 + } + default: + goto tr164 + } + goto tr106 + case 140: + switch (m.data)[(m.p)] { + case 45: + goto tr161 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr162 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr162 + } + default: + goto tr162 + } + goto tr106 + case 141: + switch (m.data)[(m.p)] { + case 45: + goto tr159 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr160 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr160 + } + default: + goto tr160 + } + goto tr106 + case 142: + switch (m.data)[(m.p)] { + case 45: + goto tr157 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr158 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr158 + } + default: + goto tr158 + } + goto tr106 + case 143: + switch (m.data)[(m.p)] { + case 45: + goto tr155 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr156 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr156 + } + default: + goto tr156 + } + goto tr106 + case 144: + switch (m.data)[(m.p)] { + case 45: + goto tr153 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr154 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr154 + } + default: + goto tr154 + } + goto tr106 + case 145: + switch (m.data)[(m.p)] { + case 45: + goto tr151 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr152 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr152 + } + default: + goto tr152 + } + goto tr106 + case 146: + switch (m.data)[(m.p)] { + case 45: + goto tr149 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr150 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr150 + } + default: + goto tr150 + } + goto tr106 + case 147: + switch (m.data)[(m.p)] { + case 45: + goto tr147 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr148 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr148 + } + default: + goto tr148 + } + goto tr106 + case 148: + switch (m.data)[(m.p)] { + case 45: + goto tr145 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr146 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr146 + } + default: + goto tr146 + } + goto tr106 + case 149: + switch (m.data)[(m.p)] { + case 45: + goto tr143 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr144 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr144 + } + default: + goto tr144 + } + goto tr106 + case 150: + switch (m.data)[(m.p)] { + case 45: + goto tr141 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr142 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr142 + } + default: + goto tr142 + } + goto tr106 + case 151: + switch (m.data)[(m.p)] { + case 45: + goto tr139 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr140 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr140 + } + default: + goto tr140 + } + goto tr106 + case 152: + switch (m.data)[(m.p)] { + case 45: + goto tr137 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr138 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr138 + } + default: + goto tr138 + } + goto tr106 + case 153: + switch (m.data)[(m.p)] { + case 45: + goto tr135 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr136 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr136 + } + default: + goto tr136 + } + goto tr106 + case 154: + switch (m.data)[(m.p)] { + case 45: + goto tr133 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr134 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr134 + } + default: + goto tr134 + } + goto tr106 + case 155: + switch (m.data)[(m.p)] { + case 45: + goto tr131 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr132 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr132 + } + default: + goto tr132 + } + goto tr106 + case 156: + switch (m.data)[(m.p)] { + case 45: + goto tr129 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr130 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr130 + } + default: + goto tr130 + } + goto tr106 + case 157: + switch (m.data)[(m.p)] { + case 45: + goto tr127 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr128 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr128 + } + default: + goto tr128 + } + goto tr106 + case 158: + switch (m.data)[(m.p)] { + case 45: + goto tr125 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr126 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr126 + } + default: + goto tr126 + } + goto tr106 + case 159: + switch (m.data)[(m.p)] { + case 45: + goto tr123 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr124 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr124 + } + default: + goto tr124 + } + goto tr106 + case 160: + switch (m.data)[(m.p)] { + case 45: + goto tr121 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr122 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr122 + } + default: + goto tr122 + } + goto tr106 + case 161: + switch (m.data)[(m.p)] { + case 45: + goto tr119 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr120 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr120 + } + default: + goto tr120 + } + goto tr106 + case 162: + switch (m.data)[(m.p)] { + case 45: + goto tr117 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr118 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr118 + } + default: + goto tr118 + } + goto tr106 + case 163: + switch (m.data)[(m.p)] { + case 45: + goto tr115 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr116 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr116 + } + default: + goto tr116 + } + goto tr106 + case 164: + switch (m.data)[(m.p)] { + case 45: + goto tr113 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr114 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr114 + } + default: + goto tr114 + } + goto tr106 + case 165: + switch (m.data)[(m.p)] { + case 45: + goto tr111 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr112 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr112 + } + default: + goto tr112 + } + goto tr106 + case 166: + switch (m.data)[(m.p)] { + case 45: + goto tr109 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr110 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr110 + } + default: + goto tr110 + } + goto tr106 + case 167: + switch (m.data)[(m.p)] { + case 45: + goto tr107 + case 82: + goto tr208 + case 114: + goto tr208 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr108 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr108 + } + default: + goto tr108 + } + goto tr103 + case 168: + switch (m.data)[(m.p)] { + case 45: + goto tr109 + case 58: + goto tr168 + case 78: + goto tr209 + case 110: + goto tr209 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr110 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr110 + } + default: + goto tr110 + } + goto tr103 + case 169: + switch (m.data)[(m.p)] { + case 45: + goto tr210 + case 58: + goto tr168 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr112 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr112 + } + default: + goto tr112 + } + goto tr106 + case 170: + switch (m.data)[(m.p)] { + case 45: + goto tr113 + case 48: + goto tr211 + } + switch { + case (m.data)[(m.p)] < 65: + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr114 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr211 + } + default: + goto tr211 + } + goto tr106 + case 171: + if (m.data)[(m.p)] == 45 { + goto tr115 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr116 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr116 + } + default: + goto tr116 + } + goto tr106 + case 193: + switch (m.data)[(m.p)] { + case 10: + goto tr183 + case 13: + goto tr183 + } + goto tr249 } - goto tr0 + + tr183: + m.cs = 0 + goto _again tr0: - - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - goto st0 - tr3: - - m.err = fmt.Errorf(errPrefix, m.p) - (m.p)-- - - { - goto st46 - } - - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - goto st0 - tr6: - - m.err = fmt.Errorf(errIdentifier, m.p) - (m.p)-- - - { - goto st46 - } - - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - goto st0 + m.cs = 0 + goto f0 + tr5: + m.cs = 0 + goto f3 + tr8: + m.cs = 0 + goto f5 tr41: - - m.err = fmt.Errorf(errSpecificString, m.p) - (m.p)-- - - { - goto st46 - } - - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - goto st0 + m.cs = 0 + goto f7 tr44: - - m.err = fmt.Errorf(errHex, m.p) - (m.p)-- - - { - goto st46 - } - - m.err = fmt.Errorf(errSpecificString, m.p) - (m.p)-- - - { - goto st46 - } - - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - goto st0 + m.cs = 0 + goto f8 + tr51: + m.cs = 0 + goto f10 + tr56: + m.cs = 0 + goto f11 + tr74: + m.cs = 0 + goto f13 + tr81: + m.cs = 0 + goto f15 + tr83: + m.cs = 0 + goto f17 + tr86: + m.cs = 0 + goto f19 + tr103: + m.cs = 0 + goto f20 + tr106: + m.cs = 0 + goto f21 + tr169: + m.cs = 0 + goto f22 + tr172: + m.cs = 0 + goto f23 + tr177: + m.cs = 0 + goto f24 + tr185: + m.cs = 0 + goto f25 + tr189: + m.cs = 0 + goto f27 + tr194: + m.cs = 0 + goto f28 + tr199: + m.cs = 0 + goto f29 + tr203: + m.cs = 0 + goto f30 + tr236: + m.cs = 0 + goto f46 + tr1: + m.cs = 2 + goto f1 + tr2: + m.cs = 3 + goto _again + tr3: + m.cs = 4 + goto _again + tr4: + m.cs = 5 + goto f2 + tr6: + m.cs = 6 + goto f4 + tr9: + m.cs = 7 + goto _again + tr11: + m.cs = 8 + goto _again + tr12: + m.cs = 9 + goto _again + tr13: + m.cs = 10 + goto _again + tr14: + m.cs = 11 + goto _again + tr15: + m.cs = 12 + goto _again + tr16: + m.cs = 13 + goto _again + tr17: + m.cs = 14 + goto _again + tr18: + m.cs = 15 + goto _again + tr19: + m.cs = 16 + goto _again + tr20: + m.cs = 17 + goto _again + tr21: + m.cs = 18 + goto _again + tr22: + m.cs = 19 + goto _again + tr23: + m.cs = 20 + goto _again + tr24: + m.cs = 21 + goto _again + tr25: + m.cs = 22 + goto _again + tr26: + m.cs = 23 + goto _again + tr27: + m.cs = 24 + goto _again + tr28: + m.cs = 25 + goto _again + tr29: + m.cs = 26 + goto _again + tr30: + m.cs = 27 + goto _again + tr31: + m.cs = 28 + goto _again + tr32: + m.cs = 29 + goto _again + tr33: + m.cs = 30 + goto _again + tr34: + m.cs = 31 + goto _again + tr35: + m.cs = 32 + goto _again + tr36: + m.cs = 33 + goto _again + tr37: + m.cs = 34 + goto _again + tr38: + m.cs = 35 + goto _again + tr39: + m.cs = 36 + goto _again + tr40: + m.cs = 37 + goto _again + tr10: + m.cs = 38 + goto f6 + tr213: + m.cs = 39 + goto _again + tr43: + m.cs = 39 + goto f4 + tr45: + m.cs = 40 + goto _again + tr46: + m.cs = 40 + goto f9 + tr7: + m.cs = 41 + goto f1 + tr49: + m.cs = 42 + goto _again tr50: + m.cs = 43 + goto _again + tr52: + m.cs = 45 + goto f1 + tr53: + m.cs = 46 + goto _again + tr54: + m.cs = 47 + goto _again + tr55: + m.cs = 48 + goto f2 + tr57: + m.cs = 49 + goto f4 + tr58: + m.cs = 50 + goto _again + tr59: + m.cs = 51 + goto _again + tr60: + m.cs = 52 + goto _again + tr61: + m.cs = 53 + goto _again + tr62: + m.cs = 54 + goto _again + tr63: + m.cs = 55 + goto _again + tr64: + m.cs = 56 + goto _again + tr65: + m.cs = 57 + goto _again + tr66: + m.cs = 58 + goto _again + tr67: + m.cs = 59 + goto _again + tr68: + m.cs = 60 + goto _again + tr69: + m.cs = 61 + goto _again + tr70: + m.cs = 62 + goto _again + tr71: + m.cs = 63 + goto _again + tr72: + m.cs = 64 + goto _again + tr73: + m.cs = 65 + goto f12 + tr75: + m.cs = 66 + goto f4 + tr78: + m.cs = 67 + goto _again + tr79: + m.cs = 68 + goto _again + tr80: + m.cs = 69 + goto f14 + tr215: + m.cs = 70 + goto f35 + tr217: + m.cs = 71 + goto _again + tr85: + m.cs = 71 + goto f18 + tr87: + m.cs = 72 + goto _again + tr88: + m.cs = 72 + goto f9 + tr76: + m.cs = 73 + goto f4 + tr91: + m.cs = 74 + goto _again + tr92: + m.cs = 75 + goto _again + tr93: + m.cs = 76 + goto _again + tr77: + m.cs = 77 + goto f4 + tr94: + m.cs = 78 + goto _again + tr95: + m.cs = 79 + goto _again + tr96: + m.cs = 80 + goto _again + tr97: + m.cs = 81 + goto _again + tr98: + m.cs = 82 + goto _again + tr99: + m.cs = 84 + goto f1 + tr100: + m.cs = 85 + goto _again + tr101: + m.cs = 86 + goto _again + tr102: + m.cs = 87 + goto f2 + tr104: + m.cs = 88 + goto f4 + tr107: + m.cs = 89 + goto _again + tr109: + m.cs = 90 + goto _again + tr111: + m.cs = 91 + goto _again + tr113: + m.cs = 92 + goto _again + tr115: + m.cs = 93 + goto _again + tr117: + m.cs = 94 + goto _again + tr119: + m.cs = 95 + goto _again + tr121: + m.cs = 96 + goto _again + tr123: + m.cs = 97 + goto _again + tr125: + m.cs = 98 + goto _again + tr127: + m.cs = 99 + goto _again + tr129: + m.cs = 100 + goto _again + tr131: + m.cs = 101 + goto _again + tr133: + m.cs = 102 + goto _again + tr135: + m.cs = 103 + goto _again + tr137: + m.cs = 104 + goto _again + tr139: + m.cs = 105 + goto _again + tr141: + m.cs = 106 + goto _again + tr143: + m.cs = 107 + goto _again + tr145: + m.cs = 108 + goto _again + tr147: + m.cs = 109 + goto _again + tr149: + m.cs = 110 + goto _again + tr151: + m.cs = 111 + goto _again + tr153: + m.cs = 112 + goto _again + tr155: + m.cs = 113 + goto _again + tr157: + m.cs = 114 + goto _again + tr159: + m.cs = 115 + goto _again + tr161: + m.cs = 116 + goto _again + tr163: + m.cs = 117 + goto _again + tr165: + m.cs = 118 + goto _again + tr167: + m.cs = 119 + goto _again + tr168: + m.cs = 120 + goto f6 + tr225: + m.cs = 121 + goto _again + tr223: + m.cs = 121 + goto f4 + tr173: + m.cs = 122 + goto _again + tr174: + m.cs = 122 + goto f9 + tr220: + m.cs = 123 + goto _again + tr171: + m.cs = 123 + goto f4 + tr178: + m.cs = 124 + goto _again + tr179: + m.cs = 124 + goto f9 + tr221: + m.cs = 125 + goto f38 + tr182: + m.cs = 126 + goto _again + tr228: + m.cs = 127 + goto _again + tr187: + m.cs = 127 + goto f26 + tr234: + m.cs = 127 + goto f44 + tr190: + m.cs = 128 + goto _again + tr191: + m.cs = 128 + goto f9 + tr240: + m.cs = 129 + goto _again + tr205: + m.cs = 129 + goto f31 + tr245: + m.cs = 129 + goto f50 + tr195: + m.cs = 130 + goto _again + tr196: + m.cs = 130 + goto f9 + tr237: + m.cs = 131 + goto f31 + tr200: + m.cs = 132 + goto _again + tr201: + m.cs = 132 + goto f9 + tr188: + m.cs = 133 + goto f26 + tr247: + m.cs = 134 + goto f45 + tr184: + m.cs = 135 + goto _again + tr206: + m.cs = 136 + goto f31 + tr248: + m.cs = 136 + goto f50 + tr166: + m.cs = 137 + goto _again + tr164: + m.cs = 138 + goto _again + tr162: + m.cs = 139 + goto _again + tr160: + m.cs = 140 + goto _again + tr158: + m.cs = 141 + goto _again + tr156: + m.cs = 142 + goto _again + tr154: + m.cs = 143 + goto _again + tr152: + m.cs = 144 + goto _again + tr150: + m.cs = 145 + goto _again + tr148: + m.cs = 146 + goto _again + tr146: + m.cs = 147 + goto _again + tr144: + m.cs = 148 + goto _again + tr142: + m.cs = 149 + goto _again + tr140: + m.cs = 150 + goto _again + tr138: + m.cs = 151 + goto _again + tr136: + m.cs = 152 + goto _again + tr134: + m.cs = 153 + goto _again + tr132: + m.cs = 154 + goto _again + tr130: + m.cs = 155 + goto _again + tr128: + m.cs = 156 + goto _again + tr126: + m.cs = 157 + goto _again + tr124: + m.cs = 158 + goto _again + tr122: + m.cs = 159 + goto _again + tr120: + m.cs = 160 + goto _again + tr118: + m.cs = 161 + goto _again + tr116: + m.cs = 162 + goto _again + tr114: + m.cs = 163 + goto _again + tr112: + m.cs = 164 + goto _again + tr110: + m.cs = 165 + goto _again + tr108: + m.cs = 166 + goto _again + tr105: + m.cs = 167 + goto f1 + tr208: + m.cs = 168 + goto _again + tr209: + m.cs = 169 + goto _again + tr210: + m.cs = 170 + goto f2 + tr211: + m.cs = 171 + goto _again + tr212: + m.cs = 172 + goto _again + tr42: + m.cs = 172 + goto f4 + tr47: + m.cs = 173 + goto _again + tr48: + m.cs = 173 + goto f9 + tr214: + m.cs = 174 + goto _again + tr82: + m.cs = 174 + goto f16 + tr216: + m.cs = 175 + goto _again + tr84: + m.cs = 175 + goto f18 + tr89: + m.cs = 176 + goto _again + tr90: + m.cs = 176 + goto f9 + tr218: + m.cs = 177 + goto _again + tr170: + m.cs = 177 + goto f4 + tr219: + m.cs = 178 + goto f38 + tr227: + m.cs = 178 + goto f42 + tr233: + m.cs = 178 + goto f45 + tr239: + m.cs = 178 + goto f48 + tr244: + m.cs = 178 + goto f51 + tr224: + m.cs = 179 + goto _again + tr222: + m.cs = 179 + goto f4 + tr175: + m.cs = 180 + goto _again + tr176: + m.cs = 180 + goto f9 + tr180: + m.cs = 181 + goto _again + tr181: + m.cs = 181 + goto f9 + tr226: + m.cs = 182 + goto _again + tr186: + m.cs = 182 + goto f26 + tr232: + m.cs = 182 + goto f44 + tr192: + m.cs = 183 + goto _again + tr193: + m.cs = 183 + goto f9 + tr229: + m.cs = 184 + goto f42 + tr235: + m.cs = 184 + goto f45 + tr230: + m.cs = 185 + goto _again + tr231: + m.cs = 186 + goto _again + tr238: + m.cs = 187 + goto _again + tr204: + m.cs = 187 + goto f31 + tr243: + m.cs = 187 + goto f50 + tr197: + m.cs = 188 + goto _again + tr198: + m.cs = 188 + goto f9 + tr241: + m.cs = 189 + goto _again + tr246: + m.cs = 189 + goto f50 + tr242: + m.cs = 190 + goto _again + tr202: + m.cs = 191 + goto _again + tr207: + m.cs = 192 + goto _again + tr249: + m.cs = 193 + goto _again + + f4: + + m.pb = m.p + + goto _again + f9: + + // List of positions in the buffer to later lowercase + output.tolower = append(output.tolower, m.p-m.pb) + + goto _again + f2: + + output.prefix = string(m.text()) + + goto _again + f6: + + output.ID = string(m.text()) + + goto _again + f38: + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + goto _again + f0: m.err = fmt.Errorf(errPrefix, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again + + goto _again + f5: m.err = fmt.Errorf(errIdentifier, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - m.err = fmt.Errorf(errParse, m.p) + goto _again + f7: + + m.err = fmt.Errorf(errSpecificString, m.p) (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + goto _again + f23: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - goto st0 - tr52: + goto _again + f11: + + m.err = fmt.Errorf(errSCIMNamespace, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f13: + + m.err = fmt.Errorf(errSCIMType, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f15: + + m.err = fmt.Errorf(errSCIMName, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f17: + + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f14: + + output.scim.Type = scimschema.TypeFromString(string(m.text())) + + goto _again + f16: + + output.scim.pos = m.p + + goto _again + f35: + + output.scim.Name = string(m.data[output.scim.pos:m.p]) + + goto _again + f18: + + output.scim.pos = m.p + + goto _again + f22: + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f21: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f42: + + output.rComponent = string(m.text()) + + goto _again + f48: + + output.qComponent = string(m.text()) + + goto _again + f44: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true + + goto _again + f50: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.qStart = true + + goto _again + f25: + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f30: + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f1: + + m.pb = m.p + + if m.parsingMode != RFC8141Only { + // Throw an error when: + // - we are entering here matching the the prefix in the namespace identifier part + // - looking ahead (3 chars) we find a colon + if pos := m.p + 3; pos < m.pe && m.data[pos] == 58 && output.prefix != "" { + m.err = fmt.Errorf(errNoUrnWithinID, pos) + (m.p)-- + + m.cs = 193 + goto _again + + } + } + + goto _again + f12: + + output.ID = string(m.text()) + + output.scim = &SCIM{} + + goto _again + f3: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f10: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again m.err = fmt.Errorf(errNoUrnWithinID, m.p) (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + goto _again + f8: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - m.err = fmt.Errorf(errIdentifier, m.p) + m.err = fmt.Errorf(errSpecificString, m.p) (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + goto _again + f19: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - m.err = fmt.Errorf(errParse, m.p) + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + goto _again + f24: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - goto st0 - stCase0: - st0: - m.cs = 0 - goto _out - tr1: + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f27: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f28: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f20: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f26: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true m.pb = m.p - goto st2 - st2: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof2 - } - stCase2: - switch (m.data)[(m.p)] { - case 82: - goto st3 - case 114: - goto st3 - } - goto tr0 - st3: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof3 - } - stCase3: - switch (m.data)[(m.p)] { - case 78: - goto st4 - case 110: - goto st4 - } - goto tr3 - st4: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof4 - } - stCase4: - if (m.data)[(m.p)] == 58 { - goto tr5 - } - goto tr0 - tr5: + goto _again + f45: - output.prefix = string(m.text()) + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again - goto st5 - st5: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof5 } - stCase5: - switch (m.data)[(m.p)] { - case 85: - goto tr8 - case 117: - goto tr8 + output.rStart = true + + output.rComponent = string(m.text()) + + goto _again + f31: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto tr7 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto tr7 - } - default: - goto tr7 - } - goto tr6 - tr7: + output.qStart = true m.pb = m.p - goto st6 - st6: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof6 - } - stCase6: - switch (m.data)[(m.p)] { - case 45: - goto st7 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st7 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st7 - } - default: - goto st7 - } - goto tr6 - st7: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof7 - } - stCase7: - switch (m.data)[(m.p)] { - case 45: - goto st8 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st8 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st8 - } - default: - goto st8 - } - goto tr6 - st8: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof8 - } - stCase8: - switch (m.data)[(m.p)] { - case 45: - goto st9 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st9 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st9 - } - default: - goto st9 - } - goto tr6 - st9: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof9 - } - stCase9: - switch (m.data)[(m.p)] { - case 45: - goto st10 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st10 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st10 - } - default: - goto st10 - } - goto tr6 - st10: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof10 - } - stCase10: - switch (m.data)[(m.p)] { - case 45: - goto st11 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st11 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st11 - } - default: - goto st11 - } - goto tr6 - st11: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof11 - } - stCase11: - switch (m.data)[(m.p)] { - case 45: - goto st12 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st12 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st12 - } - default: - goto st12 - } - goto tr6 - st12: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof12 - } - stCase12: - switch (m.data)[(m.p)] { - case 45: - goto st13 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st13 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st13 - } - default: - goto st13 - } - goto tr6 - st13: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof13 - } - stCase13: - switch (m.data)[(m.p)] { - case 45: - goto st14 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st14 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st14 - } - default: - goto st14 - } - goto tr6 - st14: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof14 - } - stCase14: - switch (m.data)[(m.p)] { - case 45: - goto st15 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st15 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st15 - } - default: - goto st15 - } - goto tr6 - st15: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof15 - } - stCase15: - switch (m.data)[(m.p)] { - case 45: - goto st16 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st16 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st16 - } - default: - goto st16 - } - goto tr6 - st16: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof16 - } - stCase16: - switch (m.data)[(m.p)] { - case 45: - goto st17 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st17 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st17 - } - default: - goto st17 - } - goto tr6 - st17: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof17 - } - stCase17: - switch (m.data)[(m.p)] { - case 45: - goto st18 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st18 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st18 - } - default: - goto st18 - } - goto tr6 - st18: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof18 - } - stCase18: - switch (m.data)[(m.p)] { - case 45: - goto st19 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st19 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st19 - } - default: - goto st19 - } - goto tr6 - st19: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof19 - } - stCase19: - switch (m.data)[(m.p)] { - case 45: - goto st20 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st20 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st20 - } - default: - goto st20 - } - goto tr6 - st20: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof20 - } - stCase20: - switch (m.data)[(m.p)] { - case 45: - goto st21 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st21 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st21 - } - default: - goto st21 - } - goto tr6 - st21: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof21 - } - stCase21: - switch (m.data)[(m.p)] { - case 45: - goto st22 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st22 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st22 - } - default: - goto st22 - } - goto tr6 - st22: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof22 - } - stCase22: - switch (m.data)[(m.p)] { - case 45: - goto st23 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st23 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st23 - } - default: - goto st23 - } - goto tr6 - st23: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof23 - } - stCase23: - switch (m.data)[(m.p)] { - case 45: - goto st24 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st24 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st24 - } - default: - goto st24 - } - goto tr6 - st24: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof24 - } - stCase24: - switch (m.data)[(m.p)] { - case 45: - goto st25 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st25 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st25 - } - default: - goto st25 - } - goto tr6 - st25: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof25 - } - stCase25: - switch (m.data)[(m.p)] { - case 45: - goto st26 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st26 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st26 - } - default: - goto st26 - } - goto tr6 - st26: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof26 - } - stCase26: - switch (m.data)[(m.p)] { - case 45: - goto st27 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st27 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st27 - } - default: - goto st27 - } - goto tr6 - st27: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof27 - } - stCase27: - switch (m.data)[(m.p)] { - case 45: - goto st28 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st28 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st28 - } - default: - goto st28 - } - goto tr6 - st28: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof28 - } - stCase28: - switch (m.data)[(m.p)] { - case 45: - goto st29 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st29 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st29 - } - default: - goto st29 - } - goto tr6 - st29: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof29 - } - stCase29: - switch (m.data)[(m.p)] { - case 45: - goto st30 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st30 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st30 - } - default: - goto st30 - } - goto tr6 - st30: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof30 - } - stCase30: - switch (m.data)[(m.p)] { - case 45: - goto st31 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st31 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st31 - } - default: - goto st31 - } - goto tr6 - st31: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof31 - } - stCase31: - switch (m.data)[(m.p)] { - case 45: - goto st32 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st32 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st32 - } - default: - goto st32 - } - goto tr6 - st32: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof32 - } - stCase32: - switch (m.data)[(m.p)] { - case 45: - goto st33 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st33 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st33 - } - default: - goto st33 - } - goto tr6 - st33: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof33 - } - stCase33: - switch (m.data)[(m.p)] { - case 45: - goto st34 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st34 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st34 - } - default: - goto st34 - } - goto tr6 - st34: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof34 - } - stCase34: - switch (m.data)[(m.p)] { - case 45: - goto st35 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st35 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st35 - } - default: - goto st35 - } - goto tr6 - st35: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof35 - } - stCase35: - switch (m.data)[(m.p)] { - case 45: - goto st36 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st36 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st36 - } - default: - goto st36 - } - goto tr6 - st36: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof36 - } - stCase36: - switch (m.data)[(m.p)] { - case 45: - goto st37 - case 58: - goto tr10 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st37 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st37 - } - default: - goto st37 - } - goto tr6 - st37: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof37 - } - stCase37: - if (m.data)[(m.p)] == 58 { - goto tr10 - } - goto tr6 - tr10: + goto _again + f51: - output.ID = string(m.text()) + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again - goto st38 - st38: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof38 } - stCase38: - switch (m.data)[(m.p)] { + output.qStart = true + + output.qComponent = string(m.text()) + + goto _again + f46: + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + f29: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + goto _again + + _again: + switch _toStateActions[m.cs] { case 33: - goto tr42 - case 36: - goto tr42 - case 37: - goto tr43 - case 61: - goto tr42 - case 95: - goto tr42 - } - switch { - case (m.data)[(m.p)] < 48: - if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { - goto tr42 - } - case (m.data)[(m.p)] > 59: - switch { - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto tr42 - } - case (m.data)[(m.p)] >= 64: - goto tr42 - } - default: - goto tr42 - } - goto tr41 - tr42: - m.pb = m.p + (m.p)-- - goto st44 - st44: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof44 + m.err = fmt.Errorf(err8141InformalID, m.p) + m.cs = 193 + goto _again } - stCase44: - switch (m.data)[(m.p)] { - case 33: - goto st44 - case 36: - goto st44 - case 37: - goto st39 - case 61: - goto st44 - case 95: - goto st44 - } - switch { - case (m.data)[(m.p)] < 48: - if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { - goto st44 - } - case (m.data)[(m.p)] > 59: - switch { - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st44 - } - case (m.data)[(m.p)] >= 64: - goto st44 - } - default: - goto st44 - } - goto tr41 - tr43: - m.pb = m.p - - goto st39 - st39: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof39 + if m.cs == 0 { + goto _out } - stCase39: - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st40 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st40 - } - default: - goto tr46 + if (m.p)++; (m.p) != (m.pe) { + goto _resume } - goto tr44 - tr46: - - m.tolower = append(m.tolower, m.p-m.pb) - - goto st40 - st40: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof40 - } - stCase40: - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st45 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st45 - } - default: - goto tr48 - } - goto tr44 - tr48: - - m.tolower = append(m.tolower, m.p-m.pb) - - goto st45 - st45: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof45 - } - stCase45: - switch (m.data)[(m.p)] { - case 33: - goto st44 - case 36: - goto st44 - case 37: - goto st39 - case 61: - goto st44 - case 95: - goto st44 - } - switch { - case (m.data)[(m.p)] < 48: - if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { - goto st44 - } - case (m.data)[(m.p)] > 59: - switch { - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st44 - } - case (m.data)[(m.p)] >= 64: - goto st44 - } - default: - goto st44 - } - goto tr44 - tr8: - - m.pb = m.p - - goto st41 - st41: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof41 - } - stCase41: - switch (m.data)[(m.p)] { - case 45: - goto st7 - case 58: - goto tr10 - case 82: - goto st42 - case 114: - goto st42 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st7 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st7 - } - default: - goto st7 - } - goto tr6 - st42: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof42 - } - stCase42: - switch (m.data)[(m.p)] { - case 45: - goto st8 - case 58: - goto tr10 - case 78: - goto st43 - case 110: - goto st43 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st8 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st8 - } - default: - goto st8 - } - goto tr50 - st43: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof43 - } - stCase43: - if (m.data)[(m.p)] == 45 { - goto st9 - } - switch { - case (m.data)[(m.p)] < 65: - if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { - goto st9 - } - case (m.data)[(m.p)] > 90: - if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { - goto st9 - } - default: - goto st9 - } - goto tr52 - st46: - if (m.p)++; (m.p) == (m.pe) { - goto _testEof46 - } - stCase46: - switch (m.data)[(m.p)] { - case 10: - goto st0 - case 13: - goto st0 - } - goto st46 - stOut: - _testEof2: - m.cs = 2 - goto _testEof - _testEof3: - m.cs = 3 - goto _testEof - _testEof4: - m.cs = 4 - goto _testEof - _testEof5: - m.cs = 5 - goto _testEof - _testEof6: - m.cs = 6 - goto _testEof - _testEof7: - m.cs = 7 - goto _testEof - _testEof8: - m.cs = 8 - goto _testEof - _testEof9: - m.cs = 9 - goto _testEof - _testEof10: - m.cs = 10 - goto _testEof - _testEof11: - m.cs = 11 - goto _testEof - _testEof12: - m.cs = 12 - goto _testEof - _testEof13: - m.cs = 13 - goto _testEof - _testEof14: - m.cs = 14 - goto _testEof - _testEof15: - m.cs = 15 - goto _testEof - _testEof16: - m.cs = 16 - goto _testEof - _testEof17: - m.cs = 17 - goto _testEof - _testEof18: - m.cs = 18 - goto _testEof - _testEof19: - m.cs = 19 - goto _testEof - _testEof20: - m.cs = 20 - goto _testEof - _testEof21: - m.cs = 21 - goto _testEof - _testEof22: - m.cs = 22 - goto _testEof - _testEof23: - m.cs = 23 - goto _testEof - _testEof24: - m.cs = 24 - goto _testEof - _testEof25: - m.cs = 25 - goto _testEof - _testEof26: - m.cs = 26 - goto _testEof - _testEof27: - m.cs = 27 - goto _testEof - _testEof28: - m.cs = 28 - goto _testEof - _testEof29: - m.cs = 29 - goto _testEof - _testEof30: - m.cs = 30 - goto _testEof - _testEof31: - m.cs = 31 - goto _testEof - _testEof32: - m.cs = 32 - goto _testEof - _testEof33: - m.cs = 33 - goto _testEof - _testEof34: - m.cs = 34 - goto _testEof - _testEof35: - m.cs = 35 - goto _testEof - _testEof36: - m.cs = 36 - goto _testEof - _testEof37: - m.cs = 37 - goto _testEof - _testEof38: - m.cs = 38 - goto _testEof - _testEof44: - m.cs = 44 - goto _testEof - _testEof39: - m.cs = 39 - goto _testEof - _testEof40: - m.cs = 40 - goto _testEof - _testEof45: - m.cs = 45 - goto _testEof - _testEof41: - m.cs = 41 - goto _testEof - _testEof42: - m.cs = 42 - goto _testEof - _testEof43: - m.cs = 43 - goto _testEof - _testEof46: - m.cs = 46 - goto _testEof - _testEof: { } if (m.p) == (m.eof) { - switch m.cs { - case 44, 45: - - raw := m.text() - output.SS = string(raw) - // Iterate upper letters lowering them - for _, i := range m.tolower { - raw[i] = raw[i] + 32 - } - output.norm = string(raw) - - case 1, 2, 4: - - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - case 3: + switch _eofActions[m.cs] { + case 1: m.err = fmt.Errorf(errPrefix, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - case 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 41: + case 6: m.err = fmt.Errorf(errIdentifier, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- - - { - goto st46 - } - - case 38: + case 8: m.err = fmt.Errorf(errSpecificString, m.p) (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + case 24: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - m.err = fmt.Errorf(errParse, m.p) + case 12: + + m.err = fmt.Errorf(errSCIMNamespace, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - case 42: + case 14: - m.err = fmt.Errorf(errPrefix, m.p) + m.err = fmt.Errorf(errSCIMType, m.p) (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + case 16: + + m.err = fmt.Errorf(errSCIMName, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 18: + + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) } + (m.p)-- + + m.cs = 193 + goto _again + + case 23: + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 22: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 26: + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 31: + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 34: + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC2141 + + case 38: + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC8141 + + case 4: m.err = fmt.Errorf(errIdentifier, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - m.err = fmt.Errorf(errParse, m.p) + m.err = fmt.Errorf(errPrefix, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - case 43: + case 11: + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again m.err = fmt.Errorf(errNoUrnWithinID, m.p) (m.p)-- - { - goto st46 - } + m.cs = 193 + goto _again - m.err = fmt.Errorf(errIdentifier, m.p) - (m.p)-- + case 9: - { - goto st46 - } + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- - m.err = fmt.Errorf(errParse, m.p) - (m.p)-- + m.cs = 193 + goto _again - { - goto st46 - } - - case 39, 40: - - m.err = fmt.Errorf(errHex, m.p) - (m.p)-- - - { - goto st46 } m.err = fmt.Errorf(errSpecificString, m.p) (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + case 20: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } - m.err = fmt.Errorf(errParse, m.p) + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } (m.p)-- - { - goto st46 + m.cs = 193 + goto _again + + case 25: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + } + + m.err = fmt.Errorf(err8141SpecificString, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 28: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 29: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 21: + + m.err = fmt.Errorf(err8141Identifier, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 42: + + output.rComponent = string(m.text()) + + output.kind = RFC8141 + + case 48: + + output.qComponent = string(m.text()) + + output.kind = RFC8141 + + case 41: + + output.fComponent = string(m.text()) + + output.kind = RFC8141 + + case 40: + + m.pb = m.p + + output.fComponent = string(m.text()) + + output.kind = RFC8141 + + case 30: + + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + case 35: + + output.scim.Name = string(m.data[output.scim.pos:m.p]) + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC7643 + + case 37: + + output.scim.Other = string(m.data[output.scim.pos:m.p]) + + output.SS = string(m.text()) + // Iterate upper letters lowering them + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 + } + + output.kind = RFC7643 + + case 44: + + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.rStart = true + + output.rComponent = string(m.text()) + + output.kind = RFC8141 + + case 50: + + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + (m.p)-- + + m.cs = 193 + goto _again + + } + output.qStart = true + + output.qComponent = string(m.text()) + + output.kind = RFC8141 } } @@ -1686,3 +5031,16 @@ func (m *machine) Parse(input []byte) (*URN, error) { return output, nil } + +func (m *machine) WithParsingMode(x ParsingMode) { + m.parsingMode = x + switch m.parsingMode { + case RFC2141Only: + m.startParsingAt = enMain + case RFC8141Only: + m.startParsingAt = enRfc8141Only + case RFC7643Only: + m.startParsingAt = enScimOnly + } + m.parsingModeSet = true +} diff --git a/vendor/github.com/leodido/go-urn/machine.go.rl b/vendor/github.com/leodido/go-urn/machine.go.rl index 3bc05a65..0a174219 100644 --- a/vendor/github.com/leodido/go-urn/machine.go.rl +++ b/vendor/github.com/leodido/go-urn/machine.go.rl @@ -2,15 +2,28 @@ package urn import ( "fmt" + + scimschema "github.com/leodido/go-urn/scim/schema" ) var ( - errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" - errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" - errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" - errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" - errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" - errParse = "parsing error [col %d]" + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]" + errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]" + errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]" + errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]" + errSCIMOther = "expecting a well-formed other SCIM part [col %d]" + errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]" + err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]" + err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]" + err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]" + err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]" + err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]" + err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" + err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" ) %%{ @@ -24,25 +37,42 @@ action mark { } action tolower { - m.tolower = append(m.tolower, m.p - m.pb) + // List of positions in the buffer to later lowercase + output.tolower = append(output.tolower, m.p - m.pb) } action set_pre { output.prefix = string(m.text()) } +action throw_pre_urn_err { + if m.parsingMode != RFC8141Only { + // Throw an error when: + // - we are entering here matching the the prefix in the namespace identifier part + // - looking ahead (3 chars) we find a colon + if pos := m.p + 3; pos < m.pe && m.data[pos] == 58 && output.prefix != "" { + m.err = fmt.Errorf(errNoUrnWithinID, pos) + fhold; + fgoto fail; + } + } +} + action set_nid { output.ID = string(m.text()) } action set_nss { - raw := m.text() - output.SS = string(raw) + output.SS = string(m.text()) // Iterate upper letters lowering them - for _, i := range m.tolower { - raw[i] = raw[i] + 32 + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] + 32 + } + output.norm = string(m.text()) + // Revert the buffer to the original + for _, i := range output.tolower { + m.data[m.pb+i] = m.data[m.pb+i] - 32 } - output.norm = string(raw) } action err_pre { @@ -70,20 +100,20 @@ action err_urn { } action err_hex { - m.err = fmt.Errorf(errHex, m.p) - fhold; - fgoto fail; + if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only { + m.err = fmt.Errorf(errHex, m.p) + fhold; + fgoto fail; + } } -action err_parse { - m.err = fmt.Errorf(errParse, m.p) - fhold; - fgoto fail; +action base_type { + output.kind = RFC2141; } -pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre; +pre = ([uU] @err(err_pre) [rR] @err(err_pre) [nN] @err(err_pre)) >mark >throw_pre_urn_err %set_pre; -nid = (alnum >mark (alnum | '-'){0,31}) %set_nid; +nid = (alnum >mark (alnum | '-'){0,31}) $err(err_nid) %set_nid; hex = '%' (digit | lower | upper >tolower){2} $err(err_hex); @@ -91,9 +121,179 @@ sss = (alnum | [()+,\-.:=@;$_!*']); nss = (sss | hex)+ $err(err_nss); +nid_not_urn = (nid - pre %err(err_urn)); + +urn = pre ':' @err(err_pre) (nid_not_urn ':' nss >mark %set_nss) %eof(base_type); + +### SCIM BEG + +action err_scim_nid { + m.err = fmt.Errorf(errSCIMNamespace, m.p) + fhold; + fgoto fail; +} + +action err_scim_type { + m.err = fmt.Errorf(errSCIMType, m.p) + fhold; + fgoto fail; +} + +action err_scim_name { + m.err = fmt.Errorf(errSCIMName, m.p) + fhold; + fgoto fail; +} + +action err_scim_other { + if m.p == m.pe { + m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1) + } else { + m.err = fmt.Errorf(errSCIMOther, m.p) + } + fhold; + fgoto fail; +} + +action scim_type { + output.kind = RFC7643; +} + +action create_scim { + output.scim = &SCIM{}; +} + +action set_scim_type { + output.scim.Type = scimschema.TypeFromString(string(m.text())) +} + +action mark_scim_name { + output.scim.pos = m.p +} + +action set_scim_name { + output.scim.Name = string(m.data[output.scim.pos:m.p]) +} + +action mark_scim_other { + output.scim.pos = m.p +} + +action set_scim_other { + output.scim.Other = string(m.data[output.scim.pos:m.p]) +} + +scim_nid = 'ietf:params:scim' >mark %set_nid %create_scim $err(err_scim_nid); + +scim_other = ':' (sss | hex)+ >mark_scim_other %set_scim_other $err(err_scim_other); + +scim_name = (alnum)+ >mark_scim_name %set_scim_name $err(err_scim_name); + +scim_type = ('schemas' | 'api' | 'param') >mark %set_scim_type $err(err_scim_type); + +scim_only := pre ':' @err(err_pre) (scim_nid ':' scim_type ':' scim_name scim_other? %set_nss) %eof(scim_type); + +### SCIM END + +### 8141 BEG + +action err_nss_8141 { + m.err = fmt.Errorf(err8141SpecificString, m.p) + fhold; + fgoto fail; +} + +action err_nid_8141 { + m.err = fmt.Errorf(err8141Identifier, m.p) + fhold; + fgoto fail; +} + +action rfc8141_type { + output.kind = RFC8141; +} + +action set_r_component { + output.rComponent = string(m.text()) +} + +action set_q_component { + output.qComponent = string(m.text()) +} + +action set_f_component { + output.fComponent = string(m.text()) +} + +action informal_nid_match { + fhold; + m.err = fmt.Errorf(err8141InformalID, m.p); + fgoto fail; +} + +action mark_r_start { + if output.rStart { + m.err = fmt.Errorf(err8141RComponentStart, m.p) + fhold; + fgoto fail; + } + output.rStart = true +} + +action mark_q_start { + if output.qStart { + m.err = fmt.Errorf(err8141QComponentStart, m.p) + fhold; + fgoto fail; + } + output.qStart = true +} + +action err_malformed_r_component { + m.err = fmt.Errorf(err8141MalformedRComp, m.p) + fhold; + fgoto fail; +} + +action err_malformed_q_component { + m.err = fmt.Errorf(err8141MalformedQComp, m.p) + fhold; + fgoto fail; +} + +pchar = (sss | '~' | '&' | hex); + +component = pchar (pchar | '/' | '?')*; + +r_start = ('?+') %mark_r_start; + +r_component = r_start <: (r_start | component)+ $err(err_malformed_r_component) >mark %set_r_component; + +q_start = ('?=') %mark_q_start; + +q_component = q_start <: (q_start | component)+ $err(err_malformed_q_component) >mark %set_q_component; + +rq_components = (r_component :>> q_component? | q_component); + +fragment = (pchar | '/' | '?')*; + +f_component = '#' fragment >mark %set_f_component; + +nss_rfc8141 = (pchar >mark (pchar | '/')*) $err(err_nss_8141) %set_nss; + +nid_rfc8141 = (alnum >mark (alnum | '-'){0,30} alnum) $err(err_nid_8141) %set_nid; + +informal_id = pre ('-' [a-zA-z0] %to(informal_nid_match)); + +nid_rfc8141_not_urn = (nid_rfc8141 - informal_id?); + +rfc8141_only := pre ':' @err(err_pre) nid_rfc8141_not_urn ':' nss_rfc8141 rq_components? f_component? %eof(rfc8141_type); + +### 8141 END + fail := (any - [\n\r])* @err{ fgoto main; }; -main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse); +main := urn; }%% @@ -103,6 +303,7 @@ main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) type Machine interface { Error() error Parse(input []byte) (*URN, error) + WithParsingMode(ParsingMode) } type machine struct { @@ -110,12 +311,24 @@ type machine struct { cs int p, pe, eof, pb int err error - tolower []int + startParsingAt int + parsingMode ParsingMode + parsingModeSet bool } // NewMachine creates a new FSM able to parse RFC 2141 strings. -func NewMachine() Machine { - m := &machine{} +func NewMachine(options ...Option) Machine { + m := &machine{ + parsingModeSet: false, + } + + for _, o := range options { + o(m) + } + // Set default parsing mode + if !m.parsingModeSet { + m.WithParsingMode(DefaultParsingMode) + } %% access m.; %% variable p m.p; @@ -137,7 +350,7 @@ func (m *machine) text() []byte { return m.data[m.pb:m.p] } -// Parse parses the input byte array as a RFC 2141 string. +// Parse parses the input byte array as a RFC 2141 or RFC7643 string. func (m *machine) Parse(input []byte) (*URN, error) { m.data = input m.p = 0 @@ -145,10 +358,11 @@ func (m *machine) Parse(input []byte) (*URN, error) { m.pe = len(input) m.eof = len(input) m.err = nil - m.tolower = []int{} - output := &URN{} + m.cs = m.startParsingAt + output := &URN{ + tolower: []int{}, + } - %% write init; %% write exec; if m.cs < first_final || m.cs == en_fail { @@ -157,3 +371,16 @@ func (m *machine) Parse(input []byte) (*URN, error) { return output, nil } + +func (m *machine) WithParsingMode(x ParsingMode) { + m.parsingMode = x + switch m.parsingMode { + case RFC2141Only: + m.startParsingAt = en_main + case RFC8141Only: + m.startParsingAt = en_rfc8141_only + case RFC7643Only: + m.startParsingAt = en_scim_only + } + m.parsingModeSet = true +} \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/makefile b/vendor/github.com/leodido/go-urn/makefile index df87cdc6..68d5dd0f 100644 --- a/vendor/github.com/leodido/go-urn/makefile +++ b/vendor/github.com/leodido/go-urn/makefile @@ -15,18 +15,24 @@ clean: .PHONY: images images: docs/urn.png +.PHONY: snake2camel +snake2camel: + @cd ./tools/snake2camel; go build -o ../../snake2camel . + .PHONY: removecomments removecomments: @cd ./tools/removecomments; go build -o ../../removecomments . machine.go: machine.go.rl +machine.go: snake2camel + machine.go: removecomments machine.go: - $(RAGEL) -Z -G2 -e -o $@ $< + $(RAGEL) -Z -G1 -e -o $@ $< @./removecomments $@ - $(MAKE) -s file=$@ snake2camel + @./snake2camel $@ $(GOFMT) $@ docs/urn.dot: machine.go.rl @@ -41,13 +47,5 @@ bench: *_test.go machine.go go test -bench=. -benchmem -benchtime=5s ./... .PHONY: tests -tests: *_test.go +tests: *_test.go $(GO_TEST) ./... - -.PHONY: snake2camel -snake2camel: - @awk -i inplace '{ \ - while ( match($$0, /(.*)([a-z]+[0-9]*)_([a-zA-Z0-9])(.*)/, cap) ) \ - $$0 = cap[1] cap[2] toupper(cap[3]) cap[4]; \ - print \ - }' $(file) diff --git a/vendor/github.com/leodido/go-urn/options.go b/vendor/github.com/leodido/go-urn/options.go new file mode 100644 index 00000000..c543835a --- /dev/null +++ b/vendor/github.com/leodido/go-urn/options.go @@ -0,0 +1,9 @@ +package urn + +type Option func(Machine) + +func WithParsingMode(mode ParsingMode) Option { + return func(m Machine) { + m.WithParsingMode(mode) + } +} diff --git a/vendor/github.com/leodido/go-urn/parsing_mode.go b/vendor/github.com/leodido/go-urn/parsing_mode.go new file mode 100644 index 00000000..fce5aadc --- /dev/null +++ b/vendor/github.com/leodido/go-urn/parsing_mode.go @@ -0,0 +1,12 @@ +package urn + +type ParsingMode int + +const ( + Default ParsingMode = iota + RFC2141Only + RFC7643Only + RFC8141Only +) + +const DefaultParsingMode = RFC2141Only diff --git a/vendor/github.com/leodido/go-urn/scim.go b/vendor/github.com/leodido/go-urn/scim.go new file mode 100644 index 00000000..f6b7aefb --- /dev/null +++ b/vendor/github.com/leodido/go-urn/scim.go @@ -0,0 +1,48 @@ +package urn + +import ( + "encoding/json" + "fmt" + + scimschema "github.com/leodido/go-urn/scim/schema" +) + +const errInvalidSCIMURN = "invalid SCIM URN: %s" + +type SCIM struct { + Type scimschema.Type + Name string + Other string + pos int +} + +func (s SCIM) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +func (s *SCIM) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + // Parse as SCIM + value, ok := Parse([]byte(str), WithParsingMode(RFC7643Only)) + if !ok { + return fmt.Errorf(errInvalidSCIMURN, str) + } + if value.RFC() != RFC7643 { + return fmt.Errorf(errInvalidSCIMURN, str) + } + *s = *value.SCIM() + + return nil +} + +func (s *SCIM) String() string { + ret := fmt.Sprintf("urn:ietf:params:scim:%s:%s", s.Type.String(), s.Name) + if s.Other != "" { + ret += fmt.Sprintf(":%s", s.Other) + } + + return ret +} diff --git a/vendor/github.com/leodido/go-urn/scim/schema/type.go b/vendor/github.com/leodido/go-urn/scim/schema/type.go new file mode 100644 index 00000000..13491823 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/scim/schema/type.go @@ -0,0 +1,36 @@ +package scimschema + +type Type int + +const ( + Unsupported Type = iota + Schemas + API + Param +) + +func (t Type) String() string { + switch t { + case Schemas: + return "schemas" + case API: + return "api" + case Param: + return "param" + } + + return "" +} + +func TypeFromString(input string) Type { + switch input { + case "schemas": + return Schemas + case "api": + return API + case "param": + return Param + } + + return Unsupported +} diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go index d51a6c91..894d6258 100644 --- a/vendor/github.com/leodido/go-urn/urn.go +++ b/vendor/github.com/leodido/go-urn/urn.go @@ -16,10 +16,18 @@ const errInvalidURN = "invalid URN: %s" // // Details at https://tools.ietf.org/html/rfc2141. type URN struct { - prefix string // Static prefix. Equal to "urn" when empty. - ID string // Namespace identifier - SS string // Namespace specific string - norm string // Normalized namespace specific string + prefix string // Static prefix. Equal to "urn" when empty. + ID string // Namespace identifier (NID) + SS string // Namespace specific string (NSS) + norm string // Normalized namespace specific string + kind Kind + scim *SCIM + rComponent string // RFC8141 + qComponent string // RFC8141 + fComponent string // RFC8141 + rStart bool // RFC8141 + qStart bool // RFC8141 + tolower []int } // Normalize turns the receiving URN into its norm version. @@ -30,12 +38,21 @@ func (u *URN) Normalize() *URN { prefix: "urn", ID: strings.ToLower(u.ID), SS: u.norm, + // rComponent: u.rComponent, + // qComponent: u.qComponent, + // fComponent: u.fComponent, } } // Equal checks the lexical equivalence of the current URN with another one. func (u *URN) Equal(x *URN) bool { - return *u.Normalize() == *x.Normalize() + if x == nil { + return false + } + nu := u.Normalize() + nx := x.Normalize() + + return nu.prefix == nx.prefix && nu.ID == nx.ID && nu.SS == nx.SS } // String reassembles the URN into a valid URN string. @@ -51,14 +68,23 @@ func (u *URN) String() string { res += "urn" } res += u.prefix + ":" + u.ID + ":" + u.SS + if u.rComponent != "" { + res += "?+" + u.rComponent + } + if u.qComponent != "" { + res += "?=" + u.qComponent + } + if u.fComponent != "" { + res += "#" + u.fComponent + } } return res } -// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax. -func Parse(u []byte) (*URN, bool) { - urn, err := NewMachine().Parse(u) +// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax (RFC 2141). +func Parse(u []byte, options ...Option) (*URN, bool) { + urn, err := NewMachine(options...).Parse(u) if err != nil { return nil, false } @@ -71,7 +97,7 @@ func (u URN) MarshalJSON() ([]byte, error) { return json.Marshal(u.String()) } -// MarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`). +// UnmarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`). func (u *URN) UnmarshalJSON(bytes []byte) error { var str string if err := json.Unmarshal(bytes, &str); err != nil { @@ -82,5 +108,34 @@ func (u *URN) UnmarshalJSON(bytes []byte) error { } else { *u = *value } + return nil -} \ No newline at end of file +} + +func (u *URN) IsSCIM() bool { + return u.kind == RFC7643 +} + +func (u *URN) SCIM() *SCIM { + if u.kind != RFC7643 { + return nil + } + + return u.scim +} + +func (u *URN) RFC() Kind { + return u.kind +} + +func (u *URN) FComponent() string { + return u.fComponent +} + +func (u *URN) QComponent() string { + return u.qComponent +} + +func (u *URN) RComponent() string { + return u.rComponent +} diff --git a/vendor/github.com/leodido/go-urn/urn8141.go b/vendor/github.com/leodido/go-urn/urn8141.go new file mode 100644 index 00000000..da4dd062 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/urn8141.go @@ -0,0 +1,30 @@ +package urn + +import ( + "encoding/json" + "fmt" +) + +const errInvalidURN8141 = "invalid URN per RFC 8141: %s" + +type URN8141 struct { + *URN +} + +func (u URN8141) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +func (u *URN8141) UnmarshalJSON(bytes []byte) error { + var str string + if err := json.Unmarshal(bytes, &str); err != nil { + return err + } + if value, ok := Parse([]byte(str), WithParsingMode(RFC8141Only)); !ok { + return fmt.Errorf(errInvalidURN8141, str) + } else { + *u = URN8141{value} + } + + return nil +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE deleted file mode 100644 index 5d8cb5b7..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore deleted file mode 100644 index e16fb946..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile deleted file mode 100644 index 81be2143..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go deleted file mode 100644 index 7c08e564..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "google.golang.org/protobuf/proto" -) - -// TODO: Give error package name prefix in next minor release. -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify a decode buffer in the - // next major version. - - // TODO: Consider using error wrapping to annotate error state in pass- - // through cases in the next minor version. - - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil); but if it does, it should - // be treated as no-op according to the Reader contract. - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go deleted file mode 100644 index c318385c..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go deleted file mode 100644 index e58dd9d2..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "google.golang.org/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify an encode buffer in the - // next major version. - - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 95bc08d5..e57d86af 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -82,6 +82,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://dnscheck.tools/ * https://github.com/egbakou/domainverifier * https://github.com/semihalev/sdns +* https://github.com/wintbiit/NineDNS Send pull request if you want to be listed here. @@ -125,6 +126,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. *all of them* * 103{4,5} - DNS standard +* 1183 - ISDN, X25 and other deprecated records * 1348 - NSAP record (removed the record) * 1982 - Serial Arithmetic * 1876 - LOC record diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go index 5e72249b..9c9972db 100644 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -160,7 +160,7 @@ func parseKey(r io.Reader, file string) (map[string]string, error) { k = l.token case zValue: if k == "" { - return nil, &ParseError{file, "no private key seen", l} + return nil, &ParseError{file: file, err: "no private key seen", lex: l} } m[strings.ToLower(k)] = l.token diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go index 713e9d2d..a81d2bc5 100644 --- a/vendor/github.com/miekg/dns/generate.go +++ b/vendor/github.com/miekg/dns/generate.go @@ -116,7 +116,7 @@ func (r *generateReader) parseError(msg string, end int) *ParseError { l.token = r.s[r.si-1 : end] l.column += r.si // l.column starts one zBLANK before r.s - return &ParseError{r.file, msg, l} + return &ParseError{file: r.file, err: msg, lex: l} } func (r *generateReader) Read(p []byte) (int, error) { diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go index d256b652..350ea5a4 100644 --- a/vendor/github.com/miekg/dns/privaterr.go +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -84,7 +84,7 @@ Fetch: err := r.Data.Parse(text) if err != nil { - return &ParseError{"", err.Error(), l} + return &ParseError{wrappedErr: err, lex: l} } return nil diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 062d8ff3..1f92ae42 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -4,7 +4,9 @@ import ( "bufio" "fmt" "io" + "io/fs" "os" + "path" "path/filepath" "strconv" "strings" @@ -64,20 +66,26 @@ const ( // ParseError is a parsing error. It contains the parse error and the location in the io.Reader // where the error occurred. type ParseError struct { - file string - err string - lex lex + file string + err string + wrappedErr error + lex lex } func (e *ParseError) Error() (s string) { if e.file != "" { s = e.file + ": " } + if e.err == "" && e.wrappedErr != nil { + e.err = e.wrappedErr.Error() + } s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) return } +func (e *ParseError) Unwrap() error { return e.wrappedErr } + type lex struct { token string // text of the token err bool // when true, token text has lexer error @@ -168,8 +176,9 @@ type ZoneParser struct { // sub is used to parse $INCLUDE files and $GENERATE directives. // Next, by calling subNext, forwards the resulting RRs from this // sub parser to the calling code. - sub *ZoneParser - osFile *os.File + sub *ZoneParser + r io.Reader + fsys fs.FS includeDepth uint8 @@ -188,7 +197,7 @@ func NewZoneParser(r io.Reader, origin, file string) *ZoneParser { if origin != "" { origin = Fqdn(origin) if _, ok := IsDomainName(origin); !ok { - pe = &ParseError{file, "bad initial origin name", lex{}} + pe = &ParseError{file: file, err: "bad initial origin name"} } } @@ -220,6 +229,24 @@ func (zp *ZoneParser) SetIncludeAllowed(v bool) { zp.includeAllowed = v } +// SetIncludeFS provides an [fs.FS] to use when looking for the target of +// $INCLUDE directives. ($INCLUDE must still be enabled separately by calling +// [ZoneParser.SetIncludeAllowed].) If fsys is nil, [os.Open] will be used. +// +// When fsys is an on-disk FS, the ability of $INCLUDE to reach files from +// outside its root directory depends upon the FS implementation. For +// instance, [os.DirFS] will refuse to open paths like "../../etc/passwd", +// however it will still follow links which may point anywhere on the system. +// +// FS paths are slash-separated on all systems, even Windows. $INCLUDE paths +// containing other characters such as backslash and colon may be accepted as +// valid, but those characters will never be interpreted by an FS +// implementation as path element separators. See [fs.ValidPath] for more +// details. +func (zp *ZoneParser) SetIncludeFS(fsys fs.FS) { + zp.fsys = fsys +} + // Err returns the first non-EOF error that was encountered by the // ZoneParser. func (zp *ZoneParser) Err() error { @@ -237,7 +264,7 @@ func (zp *ZoneParser) Err() error { } func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { - zp.parseErr = &ParseError{zp.file, err, l} + zp.parseErr = &ParseError{file: zp.file, err: err, lex: l} return nil, false } @@ -260,9 +287,11 @@ func (zp *ZoneParser) subNext() (RR, bool) { return rr, true } - if zp.sub.osFile != nil { - zp.sub.osFile.Close() - zp.sub.osFile = nil + if zp.sub.r != nil { + if c, ok := zp.sub.r.(io.Closer); ok { + c.Close() + } + zp.sub.r = nil } if zp.sub.Err() != nil { @@ -402,24 +431,44 @@ func (zp *ZoneParser) Next() (RR, bool) { // Start with the new file includePath := l.token - if !filepath.IsAbs(includePath) { - includePath = filepath.Join(filepath.Dir(zp.file), includePath) - } - - r1, e1 := os.Open(includePath) - if e1 != nil { - var as string - if !filepath.IsAbs(l.token) { - as = fmt.Sprintf(" as `%s'", includePath) + var r1 io.Reader + var e1 error + if zp.fsys != nil { + // fs.FS always uses / as separator, even on Windows, so use + // path instead of filepath here: + if !path.IsAbs(includePath) { + includePath = path.Join(path.Dir(zp.file), includePath) } - msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1) - return zp.setParseError(msg, l) + // os.DirFS, and probably others, expect all paths to be + // relative, so clean the path and remove leading / if + // present: + includePath = strings.TrimLeft(path.Clean(includePath), "/") + + r1, e1 = zp.fsys.Open(includePath) + } else { + if !filepath.IsAbs(includePath) { + includePath = filepath.Join(filepath.Dir(zp.file), includePath) + } + r1, e1 = os.Open(includePath) + } + if e1 != nil { + var as string + if includePath != l.token { + as = fmt.Sprintf(" as `%s'", includePath) + } + zp.parseErr = &ParseError{ + file: zp.file, + wrappedErr: fmt.Errorf("failed to open `%s'%s: %w", l.token, as, e1), + lex: l, + } + return nil, false } zp.sub = NewZoneParser(r1, neworigin, includePath) - zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1 + zp.sub.defttl, zp.sub.includeDepth, zp.sub.r = zp.defttl, zp.includeDepth+1, r1 zp.sub.SetIncludeAllowed(true) + zp.sub.SetIncludeFS(zp.fsys) return zp.subNext() case zExpectDirTTLBl: if l.value != zBlank { @@ -1326,12 +1375,12 @@ func slurpRemainder(c *zlexer) *ParseError { case zBlank: l, _ = c.Next() if l.value != zNewline && l.value != zEOF { - return &ParseError{"", "garbage after rdata", l} + return &ParseError{err: "garbage after rdata", lex: l} } case zNewline: case zEOF: default: - return &ParseError{"", "garbage after rdata", l} + return &ParseError{err: "garbage after rdata", lex: l} } return nil } @@ -1340,16 +1389,16 @@ func slurpRemainder(c *zlexer) *ParseError { // Used for NID and L64 record. func stringToNodeID(l lex) (uint64, *ParseError) { if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l} } // There must be three colons at fixes positions, if not its a parse error if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l} } s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] u, err := strconv.ParseUint(s, 16, 64) if err != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l} } return u, nil } diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index a635e1c5..1a90c61f 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -3,6 +3,7 @@ package dns import ( "encoding/base64" "errors" + "fmt" "net" "strconv" "strings" @@ -15,14 +16,14 @@ func endingToString(c *zlexer, errstr string) (string, *ParseError) { l, _ := c.Next() // zString for l.value != zNewline && l.value != zEOF { if l.err { - return s.String(), &ParseError{"", errstr, l} + return s.String(), &ParseError{err: errstr, lex: l} } switch l.value { case zString: s.WriteString(l.token) case zBlank: // Ok default: - return "", &ParseError{"", errstr, l} + return "", &ParseError{err: errstr, lex: l} } l, _ = c.Next() } @@ -36,7 +37,7 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { // Get the remaining data until we see a zNewline l, _ := c.Next() if l.err { - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } // Build the slice @@ -45,7 +46,7 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { empty := false for l.value != zNewline && l.value != zEOF { if l.err { - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } switch l.value { case zString: @@ -72,7 +73,7 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { case zBlank: if quote { // zBlank can only be seen in between txt parts. - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } case zQuote: if empty && quote { @@ -81,13 +82,13 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { quote = !quote empty = true default: - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } l, _ = c.Next() } if quote { - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } return s, nil @@ -102,7 +103,7 @@ func (rr *A) parse(c *zlexer, o string) *ParseError { // IPv4. isIPv4 := !strings.Contains(l.token, ":") if rr.A == nil || !isIPv4 || l.err { - return &ParseError{"", "bad A A", l} + return &ParseError{err: "bad A A", lex: l} } return slurpRemainder(c) } @@ -114,7 +115,7 @@ func (rr *AAAA) parse(c *zlexer, o string) *ParseError { // addresses cannot include ":". isIPv6 := strings.Contains(l.token, ":") if rr.AAAA == nil || !isIPv6 || l.err { - return &ParseError{"", "bad AAAA AAAA", l} + return &ParseError{err: "bad AAAA AAAA", lex: l} } return slurpRemainder(c) } @@ -123,7 +124,7 @@ func (rr *NS) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NS Ns", l} + return &ParseError{err: "bad NS Ns", lex: l} } rr.Ns = name return slurpRemainder(c) @@ -133,7 +134,7 @@ func (rr *PTR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad PTR Ptr", l} + return &ParseError{err: "bad PTR Ptr", lex: l} } rr.Ptr = name return slurpRemainder(c) @@ -143,7 +144,7 @@ func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NSAP-PTR Ptr", l} + return &ParseError{err: "bad NSAP-PTR Ptr", lex: l} } rr.Ptr = name return slurpRemainder(c) @@ -153,7 +154,7 @@ func (rr *RP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() mbox, mboxOk := toAbsoluteName(l.token, o) if l.err || !mboxOk { - return &ParseError{"", "bad RP Mbox", l} + return &ParseError{err: "bad RP Mbox", lex: l} } rr.Mbox = mbox @@ -163,7 +164,7 @@ func (rr *RP) parse(c *zlexer, o string) *ParseError { txt, txtOk := toAbsoluteName(l.token, o) if l.err || !txtOk { - return &ParseError{"", "bad RP Txt", l} + return &ParseError{err: "bad RP Txt", lex: l} } rr.Txt = txt @@ -174,7 +175,7 @@ func (rr *MR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MR Mr", l} + return &ParseError{err: "bad MR Mr", lex: l} } rr.Mr = name return slurpRemainder(c) @@ -184,7 +185,7 @@ func (rr *MB) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MB Mb", l} + return &ParseError{err: "bad MB Mb", lex: l} } rr.Mb = name return slurpRemainder(c) @@ -194,7 +195,7 @@ func (rr *MG) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MG Mg", l} + return &ParseError{err: "bad MG Mg", lex: l} } rr.Mg = name return slurpRemainder(c) @@ -219,6 +220,29 @@ func (rr *HINFO) parse(c *zlexer, o string) *ParseError { rr.Cpu = chunks[0] rr.Os = strings.Join(chunks[1:], " ") + return nil +} + +// according to RFC 1183 the parsing is identical to HINFO, so just use that code. +func (rr *ISDN) parse(c *zlexer, o string) *ParseError { + chunks, e := endingToTxtSlice(c, "bad ISDN Fields") + if e != nil { + return e + } + + if ln := len(chunks); ln == 0 { + return nil + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Address = chunks[0] + rr.SubAddress = strings.Join(chunks[1:], " ") return nil } @@ -227,7 +251,7 @@ func (rr *MINFO) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() rmail, rmailOk := toAbsoluteName(l.token, o) if l.err || !rmailOk { - return &ParseError{"", "bad MINFO Rmail", l} + return &ParseError{err: "bad MINFO Rmail", lex: l} } rr.Rmail = rmail @@ -237,7 +261,7 @@ func (rr *MINFO) parse(c *zlexer, o string) *ParseError { email, emailOk := toAbsoluteName(l.token, o) if l.err || !emailOk { - return &ParseError{"", "bad MINFO Email", l} + return &ParseError{err: "bad MINFO Email", lex: l} } rr.Email = email @@ -248,7 +272,7 @@ func (rr *MF) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MF Mf", l} + return &ParseError{err: "bad MF Mf", lex: l} } rr.Mf = name return slurpRemainder(c) @@ -258,7 +282,7 @@ func (rr *MD) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MD Md", l} + return &ParseError{err: "bad MD Md", lex: l} } rr.Md = name return slurpRemainder(c) @@ -268,7 +292,7 @@ func (rr *MX) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad MX Pref", l} + return &ParseError{err: "bad MX Pref", lex: l} } rr.Preference = uint16(i) @@ -278,7 +302,7 @@ func (rr *MX) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MX Mx", l} + return &ParseError{err: "bad MX Mx", lex: l} } rr.Mx = name @@ -289,7 +313,7 @@ func (rr *RT) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil { - return &ParseError{"", "bad RT Preference", l} + return &ParseError{err: "bad RT Preference", lex: l} } rr.Preference = uint16(i) @@ -299,7 +323,7 @@ func (rr *RT) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad RT Host", l} + return &ParseError{err: "bad RT Host", lex: l} } rr.Host = name @@ -310,7 +334,7 @@ func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad AFSDB Subtype", l} + return &ParseError{err: "bad AFSDB Subtype", lex: l} } rr.Subtype = uint16(i) @@ -320,7 +344,7 @@ func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad AFSDB Hostname", l} + return &ParseError{err: "bad AFSDB Hostname", lex: l} } rr.Hostname = name return slurpRemainder(c) @@ -329,7 +353,7 @@ func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { func (rr *X25) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if l.err { - return &ParseError{"", "bad X25 PSDNAddress", l} + return &ParseError{err: "bad X25 PSDNAddress", lex: l} } rr.PSDNAddress = l.token return slurpRemainder(c) @@ -339,7 +363,7 @@ func (rr *KX) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad KX Pref", l} + return &ParseError{err: "bad KX Pref", lex: l} } rr.Preference = uint16(i) @@ -349,7 +373,7 @@ func (rr *KX) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad KX Exchanger", l} + return &ParseError{err: "bad KX Exchanger", lex: l} } rr.Exchanger = name return slurpRemainder(c) @@ -359,7 +383,7 @@ func (rr *CNAME) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad CNAME Target", l} + return &ParseError{err: "bad CNAME Target", lex: l} } rr.Target = name return slurpRemainder(c) @@ -369,7 +393,7 @@ func (rr *DNAME) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad DNAME Target", l} + return &ParseError{err: "bad DNAME Target", lex: l} } rr.Target = name return slurpRemainder(c) @@ -379,7 +403,7 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() ns, nsOk := toAbsoluteName(l.token, o) if l.err || !nsOk { - return &ParseError{"", "bad SOA Ns", l} + return &ParseError{err: "bad SOA Ns", lex: l} } rr.Ns = ns @@ -389,7 +413,7 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError { mbox, mboxOk := toAbsoluteName(l.token, o) if l.err || !mboxOk { - return &ParseError{"", "bad SOA Mbox", l} + return &ParseError{err: "bad SOA Mbox", lex: l} } rr.Mbox = mbox @@ -402,16 +426,16 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError { for i := 0; i < 5; i++ { l, _ = c.Next() if l.err { - return &ParseError{"", "bad SOA zone parameter", l} + return &ParseError{err: "bad SOA zone parameter", lex: l} } if j, err := strconv.ParseUint(l.token, 10, 32); err != nil { if i == 0 { // Serial must be a number - return &ParseError{"", "bad SOA zone parameter", l} + return &ParseError{err: "bad SOA zone parameter", lex: l} } // We allow other fields to be unitful duration strings if v, ok = stringToTTL(l.token); !ok { - return &ParseError{"", "bad SOA zone parameter", l} + return &ParseError{err: "bad SOA zone parameter", lex: l} } } else { @@ -441,7 +465,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad SRV Priority", l} + return &ParseError{err: "bad SRV Priority", lex: l} } rr.Priority = uint16(i) @@ -449,7 +473,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil || l.err { - return &ParseError{"", "bad SRV Weight", l} + return &ParseError{err: "bad SRV Weight", lex: l} } rr.Weight = uint16(i) @@ -457,7 +481,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e2 := strconv.ParseUint(l.token, 10, 16) if e2 != nil || l.err { - return &ParseError{"", "bad SRV Port", l} + return &ParseError{err: "bad SRV Port", lex: l} } rr.Port = uint16(i) @@ -467,7 +491,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad SRV Target", l} + return &ParseError{err: "bad SRV Target", lex: l} } rr.Target = name return slurpRemainder(c) @@ -477,7 +501,7 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad NAPTR Order", l} + return &ParseError{err: "bad NAPTR Order", lex: l} } rr.Order = uint16(i) @@ -485,7 +509,7 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil || l.err { - return &ParseError{"", "bad NAPTR Preference", l} + return &ParseError{err: "bad NAPTR Preference", lex: l} } rr.Preference = uint16(i) @@ -493,57 +517,57 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Flags", l} + return &ParseError{err: "bad NAPTR Flags", lex: l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Flags = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Flags", l} + return &ParseError{err: "bad NAPTR Flags", lex: l} } } else if l.value == zQuote { rr.Flags = "" } else { - return &ParseError{"", "bad NAPTR Flags", l} + return &ParseError{err: "bad NAPTR Flags", lex: l} } // Service c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Service", l} + return &ParseError{err: "bad NAPTR Service", lex: l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Service = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Service", l} + return &ParseError{err: "bad NAPTR Service", lex: l} } } else if l.value == zQuote { rr.Service = "" } else { - return &ParseError{"", "bad NAPTR Service", l} + return &ParseError{err: "bad NAPTR Service", lex: l} } // Regexp c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Regexp", l} + return &ParseError{err: "bad NAPTR Regexp", lex: l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Regexp = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Regexp", l} + return &ParseError{err: "bad NAPTR Regexp", lex: l} } } else if l.value == zQuote { rr.Regexp = "" } else { - return &ParseError{"", "bad NAPTR Regexp", l} + return &ParseError{err: "bad NAPTR Regexp", lex: l} } // After quote no space?? @@ -553,7 +577,7 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NAPTR Replacement", l} + return &ParseError{err: "bad NAPTR Replacement", lex: l} } rr.Replacement = name return slurpRemainder(c) @@ -563,7 +587,7 @@ func (rr *TALINK) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() previousName, previousNameOk := toAbsoluteName(l.token, o) if l.err || !previousNameOk { - return &ParseError{"", "bad TALINK PreviousName", l} + return &ParseError{err: "bad TALINK PreviousName", lex: l} } rr.PreviousName = previousName @@ -573,7 +597,7 @@ func (rr *TALINK) parse(c *zlexer, o string) *ParseError { nextName, nextNameOk := toAbsoluteName(l.token, o) if l.err || !nextNameOk { - return &ParseError{"", "bad TALINK NextName", l} + return &ParseError{err: "bad TALINK NextName", lex: l} } rr.NextName = nextName @@ -591,7 +615,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err || i > 90 { - return &ParseError{"", "bad LOC Latitude", l} + return &ParseError{err: "bad LOC Latitude", lex: l} } rr.Latitude = 1000 * 60 * 60 * uint32(i) @@ -602,7 +626,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { goto East } if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 { - return &ParseError{"", "bad LOC Latitude minutes", l} + return &ParseError{err: "bad LOC Latitude minutes", lex: l} } else { rr.Latitude += 1000 * 60 * uint32(i) } @@ -610,7 +634,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 { - return &ParseError{"", "bad LOC Latitude seconds", l} + return &ParseError{err: "bad LOC Latitude seconds", lex: l} } else { rr.Latitude += uint32(1000 * i) } @@ -621,14 +645,14 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { goto East } // If still alive, flag an error - return &ParseError{"", "bad LOC Latitude North/South", l} + return &ParseError{err: "bad LOC Latitude North/South", lex: l} East: // East c.Next() // zBlank l, _ = c.Next() if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 180 { - return &ParseError{"", "bad LOC Longitude", l} + return &ParseError{err: "bad LOC Longitude", lex: l} } else { rr.Longitude = 1000 * 60 * 60 * uint32(i) } @@ -639,14 +663,14 @@ East: goto Altitude } if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 { - return &ParseError{"", "bad LOC Longitude minutes", l} + return &ParseError{err: "bad LOC Longitude minutes", lex: l} } else { rr.Longitude += 1000 * 60 * uint32(i) } c.Next() // zBlank l, _ = c.Next() if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 { - return &ParseError{"", "bad LOC Longitude seconds", l} + return &ParseError{err: "bad LOC Longitude seconds", lex: l} } else { rr.Longitude += uint32(1000 * i) } @@ -657,19 +681,19 @@ East: goto Altitude } // If still alive, flag an error - return &ParseError{"", "bad LOC Longitude East/West", l} + return &ParseError{err: "bad LOC Longitude East/West", lex: l} Altitude: c.Next() // zBlank l, _ = c.Next() if l.token == "" || l.err { - return &ParseError{"", "bad LOC Altitude", l} + return &ParseError{err: "bad LOC Altitude", lex: l} } if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { l.token = l.token[0 : len(l.token)-1] } if i, err := strconv.ParseFloat(l.token, 64); err != nil { - return &ParseError{"", "bad LOC Altitude", l} + return &ParseError{err: "bad LOC Altitude", lex: l} } else { rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) } @@ -684,19 +708,19 @@ Altitude: case 0: // Size exp, m, ok := stringToCm(l.token) if !ok { - return &ParseError{"", "bad LOC Size", l} + return &ParseError{err: "bad LOC Size", lex: l} } rr.Size = exp&0x0f | m<<4&0xf0 case 1: // HorizPre exp, m, ok := stringToCm(l.token) if !ok { - return &ParseError{"", "bad LOC HorizPre", l} + return &ParseError{err: "bad LOC HorizPre", lex: l} } rr.HorizPre = exp&0x0f | m<<4&0xf0 case 2: // VertPre exp, m, ok := stringToCm(l.token) if !ok { - return &ParseError{"", "bad LOC VertPre", l} + return &ParseError{err: "bad LOC VertPre", lex: l} } rr.VertPre = exp&0x0f | m<<4&0xf0 } @@ -704,7 +728,7 @@ Altitude: case zBlank: // Ok default: - return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l} + return &ParseError{err: "bad LOC Size, HorizPre or VertPre", lex: l} } l, _ = c.Next() } @@ -716,14 +740,14 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad HIP PublicKeyAlgorithm", l} + return &ParseError{err: "bad HIP PublicKeyAlgorithm", lex: l} } rr.PublicKeyAlgorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString if l.token == "" || l.err { - return &ParseError{"", "bad HIP Hit", l} + return &ParseError{err: "bad HIP Hit", lex: l} } rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. rr.HitLength = uint8(len(rr.Hit)) / 2 @@ -731,12 +755,12 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() // zString if l.token == "" || l.err { - return &ParseError{"", "bad HIP PublicKey", l} + return &ParseError{err: "bad HIP PublicKey", lex: l} } rr.PublicKey = l.token // This cannot contain spaces decodedPK, decodedPKerr := base64.StdEncoding.DecodeString(rr.PublicKey) if decodedPKerr != nil { - return &ParseError{"", "bad HIP PublicKey", l} + return &ParseError{err: "bad HIP PublicKey", lex: l} } rr.PublicKeyLength = uint16(len(decodedPK)) @@ -748,13 +772,13 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { case zString: name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad HIP RendezvousServers", l} + return &ParseError{err: "bad HIP RendezvousServers", lex: l} } xs = append(xs, name) case zBlank: // Ok default: - return &ParseError{"", "bad HIP RendezvousServers", l} + return &ParseError{err: "bad HIP RendezvousServers", lex: l} } l, _ = c.Next() } @@ -768,7 +792,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError { if v, ok := StringToCertType[l.token]; ok { rr.Type = v } else if i, err := strconv.ParseUint(l.token, 10, 16); err != nil { - return &ParseError{"", "bad CERT Type", l} + return &ParseError{err: "bad CERT Type", lex: l} } else { rr.Type = uint16(i) } @@ -776,7 +800,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad CERT KeyTag", l} + return &ParseError{err: "bad CERT KeyTag", lex: l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -784,7 +808,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError { if v, ok := StringToAlgorithm[l.token]; ok { rr.Algorithm = v } else if i, err := strconv.ParseUint(l.token, 10, 8); err != nil { - return &ParseError{"", "bad CERT Algorithm", l} + return &ParseError{err: "bad CERT Algorithm", lex: l} } else { rr.Algorithm = uint8(i) } @@ -810,7 +834,7 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { j, e := strconv.ParseUint(l.token, 10, 32) if e != nil { // Serial must be a number - return &ParseError{"", "bad CSYNC serial", l} + return &ParseError{err: "bad CSYNC serial", lex: l} } rr.Serial = uint32(j) @@ -820,7 +844,7 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { j, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil { // Serial must be a number - return &ParseError{"", "bad CSYNC flags", l} + return &ParseError{err: "bad CSYNC flags", lex: l} } rr.Flags = uint16(j) @@ -838,12 +862,12 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return &ParseError{"", "bad CSYNC TypeBitMap", l} + return &ParseError{err: "bad CSYNC TypeBitMap", lex: l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return &ParseError{"", "bad CSYNC TypeBitMap", l} + return &ParseError{err: "bad CSYNC TypeBitMap", lex: l} } l, _ = c.Next() } @@ -854,7 +878,7 @@ func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return &ParseError{"", "bad ZONEMD Serial", l} + return &ParseError{err: "bad ZONEMD Serial", lex: l} } rr.Serial = uint32(i) @@ -862,7 +886,7 @@ func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad ZONEMD Scheme", l} + return &ParseError{err: "bad ZONEMD Scheme", lex: l} } rr.Scheme = uint8(i) @@ -870,7 +894,7 @@ func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad ZONEMD Hash Algorithm", l} + return &ParseError{err: "bad ZONEMD Hash Algorithm", lex: l} } rr.Hash = uint8(i) @@ -891,11 +915,11 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { if strings.HasPrefix(tokenUpper, "TYPE") { t, ok = typeToInt(l.token) if !ok { - return &ParseError{"", "bad RRSIG Typecovered", l} + return &ParseError{err: "bad RRSIG Typecovered", lex: l} } rr.TypeCovered = t } else { - return &ParseError{"", "bad RRSIG Typecovered", l} + return &ParseError{err: "bad RRSIG Typecovered", lex: l} } } else { rr.TypeCovered = t @@ -904,14 +928,14 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() if l.err { - return &ParseError{"", "bad RRSIG Algorithm", l} + return &ParseError{err: "bad RRSIG Algorithm", lex: l} } i, e := strconv.ParseUint(l.token, 10, 8) rr.Algorithm = uint8(i) // if 0 we'll check the mnemonic in the if if e != nil { v, ok := StringToAlgorithm[l.token] if !ok { - return &ParseError{"", "bad RRSIG Algorithm", l} + return &ParseError{err: "bad RRSIG Algorithm", lex: l} } rr.Algorithm = v } @@ -920,7 +944,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad RRSIG Labels", l} + return &ParseError{err: "bad RRSIG Labels", lex: l} } rr.Labels = uint8(i) @@ -928,7 +952,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 32) if e2 != nil || l.err { - return &ParseError{"", "bad RRSIG OrigTtl", l} + return &ParseError{err: "bad RRSIG OrigTtl", lex: l} } rr.OrigTtl = uint32(i) @@ -939,7 +963,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { if i, err := strconv.ParseUint(l.token, 10, 32); err == nil { rr.Expiration = uint32(i) } else { - return &ParseError{"", "bad RRSIG Expiration", l} + return &ParseError{err: "bad RRSIG Expiration", lex: l} } } else { rr.Expiration = i @@ -951,7 +975,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { if i, err := strconv.ParseUint(l.token, 10, 32); err == nil { rr.Inception = uint32(i) } else { - return &ParseError{"", "bad RRSIG Inception", l} + return &ParseError{err: "bad RRSIG Inception", lex: l} } } else { rr.Inception = i @@ -961,7 +985,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e3 := strconv.ParseUint(l.token, 10, 16) if e3 != nil || l.err { - return &ParseError{"", "bad RRSIG KeyTag", l} + return &ParseError{err: "bad RRSIG KeyTag", lex: l} } rr.KeyTag = uint16(i) @@ -970,7 +994,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { rr.SignerName = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad RRSIG SignerName", l} + return &ParseError{err: "bad RRSIG SignerName", lex: l} } rr.SignerName = name @@ -983,11 +1007,13 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { return nil } +func (rr *NXT) parse(c *zlexer, o string) *ParseError { return rr.NSEC.parse(c, o) } + func (rr *NSEC) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NSEC NextDomain", l} + return &ParseError{err: "bad NSEC NextDomain", lex: l} } rr.NextDomain = name @@ -1005,12 +1031,12 @@ func (rr *NSEC) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return &ParseError{"", "bad NSEC TypeBitMap", l} + return &ParseError{err: "bad NSEC TypeBitMap", lex: l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return &ParseError{"", "bad NSEC TypeBitMap", l} + return &ParseError{err: "bad NSEC TypeBitMap", lex: l} } l, _ = c.Next() } @@ -1021,27 +1047,27 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad NSEC3 Hash", l} + return &ParseError{err: "bad NSEC3 Hash", lex: l} } rr.Hash = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad NSEC3 Flags", l} + return &ParseError{err: "bad NSEC3 Flags", lex: l} } rr.Flags = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 16) if e2 != nil || l.err { - return &ParseError{"", "bad NSEC3 Iterations", l} + return &ParseError{err: "bad NSEC3 Iterations", lex: l} } rr.Iterations = uint16(i) c.Next() l, _ = c.Next() if l.token == "" || l.err { - return &ParseError{"", "bad NSEC3 Salt", l} + return &ParseError{err: "bad NSEC3 Salt", lex: l} } if l.token != "-" { rr.SaltLength = uint8(len(l.token)) / 2 @@ -1051,7 +1077,7 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { c.Next() l, _ = c.Next() if l.token == "" || l.err { - return &ParseError{"", "bad NSEC3 NextDomain", l} + return &ParseError{err: "bad NSEC3 NextDomain", lex: l} } rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) rr.NextDomain = l.token @@ -1070,12 +1096,12 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return &ParseError{"", "bad NSEC3 TypeBitMap", l} + return &ParseError{err: "bad NSEC3 TypeBitMap", lex: l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return &ParseError{"", "bad NSEC3 TypeBitMap", l} + return &ParseError{err: "bad NSEC3 TypeBitMap", lex: l} } l, _ = c.Next() } @@ -1086,21 +1112,21 @@ func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad NSEC3PARAM Hash", l} + return &ParseError{err: "bad NSEC3PARAM Hash", lex: l} } rr.Hash = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad NSEC3PARAM Flags", l} + return &ParseError{err: "bad NSEC3PARAM Flags", lex: l} } rr.Flags = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 16) if e2 != nil || l.err { - return &ParseError{"", "bad NSEC3PARAM Iterations", l} + return &ParseError{err: "bad NSEC3PARAM Iterations", lex: l} } rr.Iterations = uint16(i) c.Next() @@ -1115,7 +1141,7 @@ func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError { func (rr *EUI48) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if len(l.token) != 17 || l.err { - return &ParseError{"", "bad EUI48 Address", l} + return &ParseError{err: "bad EUI48 Address", lex: l} } addr := make([]byte, 12) dash := 0 @@ -1124,7 +1150,7 @@ func (rr *EUI48) parse(c *zlexer, o string) *ParseError { addr[i+1] = l.token[i+1+dash] dash++ if l.token[i+1+dash] != '-' { - return &ParseError{"", "bad EUI48 Address", l} + return &ParseError{err: "bad EUI48 Address", lex: l} } } addr[10] = l.token[15] @@ -1132,7 +1158,7 @@ func (rr *EUI48) parse(c *zlexer, o string) *ParseError { i, e := strconv.ParseUint(string(addr), 16, 48) if e != nil { - return &ParseError{"", "bad EUI48 Address", l} + return &ParseError{err: "bad EUI48 Address", lex: l} } rr.Address = i return slurpRemainder(c) @@ -1141,7 +1167,7 @@ func (rr *EUI48) parse(c *zlexer, o string) *ParseError { func (rr *EUI64) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if len(l.token) != 23 || l.err { - return &ParseError{"", "bad EUI64 Address", l} + return &ParseError{err: "bad EUI64 Address", lex: l} } addr := make([]byte, 16) dash := 0 @@ -1150,7 +1176,7 @@ func (rr *EUI64) parse(c *zlexer, o string) *ParseError { addr[i+1] = l.token[i+1+dash] dash++ if l.token[i+1+dash] != '-' { - return &ParseError{"", "bad EUI64 Address", l} + return &ParseError{err: "bad EUI64 Address", lex: l} } } addr[14] = l.token[21] @@ -1158,7 +1184,7 @@ func (rr *EUI64) parse(c *zlexer, o string) *ParseError { i, e := strconv.ParseUint(string(addr), 16, 64) if e != nil { - return &ParseError{"", "bad EUI68 Address", l} + return &ParseError{err: "bad EUI68 Address", lex: l} } rr.Address = i return slurpRemainder(c) @@ -1168,14 +1194,14 @@ func (rr *SSHFP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad SSHFP Algorithm", l} + return &ParseError{err: "bad SSHFP Algorithm", lex: l} } rr.Algorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad SSHFP Type", l} + return &ParseError{err: "bad SSHFP Type", lex: l} } rr.Type = uint8(i) c.Next() // zBlank @@ -1191,21 +1217,21 @@ func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad " + typ + " Flags", l} + return &ParseError{err: "bad " + typ + " Flags", lex: l} } rr.Flags = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad " + typ + " Protocol", l} + return &ParseError{err: "bad " + typ + " Protocol", lex: l} } rr.Protocol = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad " + typ + " Algorithm", l} + return &ParseError{err: "bad " + typ + " Algorithm", lex: l} } rr.Algorithm = uint8(i) s, e3 := endingToString(c, "bad "+typ+" PublicKey") @@ -1227,7 +1253,7 @@ func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() num, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad IPSECKEY value", l} + return &ParseError{err: "bad IPSECKEY value", lex: l} } rr.Precedence = uint8(num) c.Next() // zBlank @@ -1235,7 +1261,7 @@ func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() num, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad IPSECKEY value", l} + return &ParseError{err: "bad IPSECKEY value", lex: l} } rr.GatewayType = uint8(num) c.Next() // zBlank @@ -1243,19 +1269,19 @@ func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() num, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad IPSECKEY value", l} + return &ParseError{err: "bad IPSECKEY value", lex: l} } rr.Algorithm = uint8(num) c.Next() // zBlank l, _ = c.Next() if l.err { - return &ParseError{"", "bad IPSECKEY gateway", l} + return &ParseError{err: "bad IPSECKEY gateway", lex: l} } rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType) if err != nil { - return &ParseError{"", "IPSECKEY " + err.Error(), l} + return &ParseError{wrappedErr: fmt.Errorf("IPSECKEY %w", err), lex: l} } c.Next() // zBlank @@ -1272,14 +1298,14 @@ func (rr *AMTRELAY) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() num, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad AMTRELAY value", l} + return &ParseError{err: "bad AMTRELAY value", lex: l} } rr.Precedence = uint8(num) c.Next() // zBlank l, _ = c.Next() if l.err || !(l.token == "0" || l.token == "1") { - return &ParseError{"", "bad discovery value", l} + return &ParseError{err: "bad discovery value", lex: l} } if l.token == "1" { rr.GatewayType = 0x80 @@ -1290,19 +1316,19 @@ func (rr *AMTRELAY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() num, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad AMTRELAY value", l} + return &ParseError{err: "bad AMTRELAY value", lex: l} } rr.GatewayType |= uint8(num) c.Next() // zBlank l, _ = c.Next() if l.err { - return &ParseError{"", "bad AMTRELAY gateway", l} + return &ParseError{err: "bad AMTRELAY gateway", lex: l} } rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType&0x7f) if err != nil { - return &ParseError{"", "AMTRELAY " + err.Error(), l} + return &ParseError{wrappedErr: fmt.Errorf("AMTRELAY %w", err), lex: l} } return slurpRemainder(c) @@ -1338,21 +1364,21 @@ func (rr *RKEY) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad RKEY Flags", l} + return &ParseError{err: "bad RKEY Flags", lex: l} } rr.Flags = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad RKEY Protocol", l} + return &ParseError{err: "bad RKEY Protocol", lex: l} } rr.Protocol = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad RKEY Algorithm", l} + return &ParseError{err: "bad RKEY Algorithm", lex: l} } rr.Algorithm = uint8(i) s, e3 := endingToString(c, "bad RKEY PublicKey") @@ -1385,21 +1411,21 @@ func (rr *GPOS) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() _, e := strconv.ParseFloat(l.token, 64) if e != nil || l.err { - return &ParseError{"", "bad GPOS Longitude", l} + return &ParseError{err: "bad GPOS Longitude", lex: l} } rr.Longitude = l.token c.Next() // zBlank l, _ = c.Next() _, e1 := strconv.ParseFloat(l.token, 64) if e1 != nil || l.err { - return &ParseError{"", "bad GPOS Latitude", l} + return &ParseError{err: "bad GPOS Latitude", lex: l} } rr.Latitude = l.token c.Next() // zBlank l, _ = c.Next() _, e2 := strconv.ParseFloat(l.token, 64) if e2 != nil || l.err { - return &ParseError{"", "bad GPOS Altitude", l} + return &ParseError{err: "bad GPOS Altitude", lex: l} } rr.Altitude = l.token return slurpRemainder(c) @@ -1409,7 +1435,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad " + typ + " KeyTag", l} + return &ParseError{err: "bad " + typ + " KeyTag", lex: l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1418,7 +1444,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { tokenUpper := strings.ToUpper(l.token) i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { - return &ParseError{"", "bad " + typ + " Algorithm", l} + return &ParseError{err: "bad " + typ + " Algorithm", lex: l} } rr.Algorithm = i } else { @@ -1428,7 +1454,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad " + typ + " DigestType", l} + return &ParseError{err: "bad " + typ + " DigestType", lex: l} } rr.DigestType = uint8(i) s, e2 := endingToString(c, "bad "+typ+" Digest") @@ -1443,7 +1469,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad TA KeyTag", l} + return &ParseError{err: "bad TA KeyTag", lex: l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1452,7 +1478,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { - return &ParseError{"", "bad TA Algorithm", l} + return &ParseError{err: "bad TA Algorithm", lex: l} } rr.Algorithm = i } else { @@ -1462,7 +1488,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad TA DigestType", l} + return &ParseError{err: "bad TA DigestType", lex: l} } rr.DigestType = uint8(i) s, e2 := endingToString(c, "bad TA Digest") @@ -1477,21 +1503,21 @@ func (rr *TLSA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad TLSA Usage", l} + return &ParseError{err: "bad TLSA Usage", lex: l} } rr.Usage = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad TLSA Selector", l} + return &ParseError{err: "bad TLSA Selector", lex: l} } rr.Selector = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad TLSA MatchingType", l} + return &ParseError{err: "bad TLSA MatchingType", lex: l} } rr.MatchingType = uint8(i) // So this needs be e2 (i.e. different than e), because...??t @@ -1507,21 +1533,21 @@ func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad SMIMEA Usage", l} + return &ParseError{err: "bad SMIMEA Usage", lex: l} } rr.Usage = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad SMIMEA Selector", l} + return &ParseError{err: "bad SMIMEA Selector", lex: l} } rr.Selector = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad SMIMEA MatchingType", l} + return &ParseError{err: "bad SMIMEA MatchingType", lex: l} } rr.MatchingType = uint8(i) // So this needs be e2 (i.e. different than e), because...??t @@ -1536,14 +1562,14 @@ func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError { func (rr *RFC3597) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if l.token != "\\#" { - return &ParseError{"", "bad RFC3597 Rdata", l} + return &ParseError{err: "bad RFC3597 Rdata", lex: l} } c.Next() // zBlank l, _ = c.Next() rdlength, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad RFC3597 Rdata ", l} + return &ParseError{err: "bad RFC3597 Rdata ", lex: l} } s, e1 := endingToString(c, "bad RFC3597 Rdata") @@ -1551,7 +1577,7 @@ func (rr *RFC3597) parse(c *zlexer, o string) *ParseError { return e1 } if int(rdlength)*2 != len(s) { - return &ParseError{"", "bad RFC3597 Rdata", l} + return &ParseError{err: "bad RFC3597 Rdata", lex: l} } rr.Rdata = s return nil @@ -1599,14 +1625,14 @@ func (rr *URI) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad URI Priority", l} + return &ParseError{err: "bad URI Priority", lex: l} } rr.Priority = uint16(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil || l.err { - return &ParseError{"", "bad URI Weight", l} + return &ParseError{err: "bad URI Weight", lex: l} } rr.Weight = uint16(i) @@ -1616,7 +1642,7 @@ func (rr *URI) parse(c *zlexer, o string) *ParseError { return e2 } if len(s) != 1 { - return &ParseError{"", "bad URI Target", l} + return &ParseError{err: "bad URI Target", lex: l} } rr.Target = s[0] return nil @@ -1636,7 +1662,7 @@ func (rr *NID) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad NID Preference", l} + return &ParseError{err: "bad NID Preference", lex: l} } rr.Preference = uint16(i) c.Next() // zBlank @@ -1653,14 +1679,14 @@ func (rr *L32) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad L32 Preference", l} + return &ParseError{err: "bad L32 Preference", lex: l} } rr.Preference = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString rr.Locator32 = net.ParseIP(l.token) if rr.Locator32 == nil || l.err { - return &ParseError{"", "bad L32 Locator", l} + return &ParseError{err: "bad L32 Locator", lex: l} } return slurpRemainder(c) } @@ -1669,7 +1695,7 @@ func (rr *LP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad LP Preference", l} + return &ParseError{err: "bad LP Preference", lex: l} } rr.Preference = uint16(i) @@ -1678,7 +1704,7 @@ func (rr *LP) parse(c *zlexer, o string) *ParseError { rr.Fqdn = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad LP Fqdn", l} + return &ParseError{err: "bad LP Fqdn", lex: l} } rr.Fqdn = name return slurpRemainder(c) @@ -1688,7 +1714,7 @@ func (rr *L64) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad L64 Preference", l} + return &ParseError{err: "bad L64 Preference", lex: l} } rr.Preference = uint16(i) c.Next() // zBlank @@ -1705,7 +1731,7 @@ func (rr *UID) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return &ParseError{"", "bad UID Uid", l} + return &ParseError{err: "bad UID Uid", lex: l} } rr.Uid = uint32(i) return slurpRemainder(c) @@ -1715,7 +1741,7 @@ func (rr *GID) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return &ParseError{"", "bad GID Gid", l} + return &ParseError{err: "bad GID Gid", lex: l} } rr.Gid = uint32(i) return slurpRemainder(c) @@ -1737,7 +1763,7 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad PX Preference", l} + return &ParseError{err: "bad PX Preference", lex: l} } rr.Preference = uint16(i) @@ -1746,7 +1772,7 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError { rr.Map822 = l.token map822, map822Ok := toAbsoluteName(l.token, o) if l.err || !map822Ok { - return &ParseError{"", "bad PX Map822", l} + return &ParseError{err: "bad PX Map822", lex: l} } rr.Map822 = map822 @@ -1755,7 +1781,7 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError { rr.Mapx400 = l.token mapx400, mapx400Ok := toAbsoluteName(l.token, o) if l.err || !mapx400Ok { - return &ParseError{"", "bad PX Mapx400", l} + return &ParseError{err: "bad PX Mapx400", lex: l} } rr.Mapx400 = mapx400 return slurpRemainder(c) @@ -1765,14 +1791,14 @@ func (rr *CAA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad CAA Flag", l} + return &ParseError{err: "bad CAA Flag", lex: l} } rr.Flag = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString if l.value != zString { - return &ParseError{"", "bad CAA Tag", l} + return &ParseError{err: "bad CAA Tag", lex: l} } rr.Tag = l.token @@ -1782,7 +1808,7 @@ func (rr *CAA) parse(c *zlexer, o string) *ParseError { return e1 } if len(s) != 1 { - return &ParseError{"", "bad CAA Value", l} + return &ParseError{err: "bad CAA Value", lex: l} } rr.Value = s[0] return nil @@ -1793,7 +1819,7 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError { // Algorithm if l.value != zString { - return &ParseError{"", "bad TKEY algorithm", l} + return &ParseError{err: "bad TKEY algorithm", lex: l} } rr.Algorithm = l.token c.Next() // zBlank @@ -1802,13 +1828,13 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad TKEY key length", l} + return &ParseError{err: "bad TKEY key length", lex: l} } rr.KeySize = uint16(i) c.Next() // zBlank l, _ = c.Next() if l.value != zString { - return &ParseError{"", "bad TKEY key", l} + return &ParseError{err: "bad TKEY key", lex: l} } rr.Key = l.token c.Next() // zBlank @@ -1817,13 +1843,13 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad TKEY otherdata length", l} + return &ParseError{err: "bad TKEY otherdata length", lex: l} } rr.OtherLen = uint16(i) c.Next() // zBlank l, _ = c.Next() if l.value != zString { - return &ParseError{"", "bad TKEY otherday", l} + return &ParseError{err: "bad TKEY otherday", lex: l} } rr.OtherData = l.token return nil @@ -1841,14 +1867,14 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { continue } if l.value != zString { - return &ParseError{"", "unexpected APL field", l} + return &ParseError{err: "unexpected APL field", lex: l} } // Expected format: [!]afi:address/prefix colon := strings.IndexByte(l.token, ':') if colon == -1 { - return &ParseError{"", "missing colon in APL field", l} + return &ParseError{err: "missing colon in APL field", lex: l} } family, cidr := l.token[:colon], l.token[colon+1:] @@ -1861,7 +1887,7 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { afi, e := strconv.ParseUint(family, 10, 16) if e != nil { - return &ParseError{"", "failed to parse APL family: " + e.Error(), l} + return &ParseError{wrappedErr: fmt.Errorf("failed to parse APL family: %w", e), lex: l} } var addrLen int switch afi { @@ -1870,19 +1896,19 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { case 2: addrLen = net.IPv6len default: - return &ParseError{"", "unrecognized APL family", l} + return &ParseError{err: "unrecognized APL family", lex: l} } ip, subnet, e1 := net.ParseCIDR(cidr) if e1 != nil { - return &ParseError{"", "failed to parse APL address: " + e1.Error(), l} + return &ParseError{wrappedErr: fmt.Errorf("failed to parse APL address: %w", e1), lex: l} } if !ip.Equal(subnet.IP) { - return &ParseError{"", "extra bits in APL address", l} + return &ParseError{err: "extra bits in APL address", lex: l} } if len(subnet.IP) != addrLen { - return &ParseError{"", "address mismatch with the APL family", l} + return &ParseError{err: "address mismatch with the APL family", lex: l} } prefixes = append(prefixes, APLPrefix{ diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index d38aa2f0..c1a740b6 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -85,7 +85,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{l.token, "bad SVCB priority", l} + return &ParseError{file: l.token, err: "bad SVCB priority", lex: l} } rr.Priority = uint16(i) @@ -95,7 +95,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{l.token, "bad SVCB Target", l} + return &ParseError{file: l.token, err: "bad SVCB Target", lex: l} } rr.Target = name @@ -111,7 +111,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { if !canHaveNextKey { // The key we can now read was probably meant to be // a part of the last value. - return &ParseError{l.token, "bad SVCB value quotation", l} + return &ParseError{file: l.token, err: "bad SVCB value quotation", lex: l} } // In key=value pairs, value does not have to be quoted unless value @@ -124,7 +124,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { // Key with no value and no equality sign key = l.token } else if idx == 0 { - return &ParseError{l.token, "bad SVCB key", l} + return &ParseError{file: l.token, err: "bad SVCB key", lex: l} } else { key, value = l.token[:idx], l.token[idx+1:] @@ -144,30 +144,30 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { value = l.token l, _ = c.Next() if l.value != zQuote { - return &ParseError{l.token, "SVCB unterminated value", l} + return &ParseError{file: l.token, err: "SVCB unterminated value", lex: l} } case zQuote: // There's nothing in double quotes. default: - return &ParseError{l.token, "bad SVCB value", l} + return &ParseError{file: l.token, err: "bad SVCB value", lex: l} } } } } kv := makeSVCBKeyValue(svcbStringToKey(key)) if kv == nil { - return &ParseError{l.token, "bad SVCB key", l} + return &ParseError{file: l.token, err: "bad SVCB key", lex: l} } if err := kv.parse(value); err != nil { - return &ParseError{l.token, err.Error(), l} + return &ParseError{file: l.token, wrappedErr: err, lex: l} } xs = append(xs, kv) case zQuote: - return &ParseError{l.token, "SVCB key can't contain double quotes", l} + return &ParseError{file: l.token, err: "SVCB key can't contain double quotes", lex: l} case zBlank: canHaveNextKey = true default: - return &ParseError{l.token, "bad SVCB values", l} + return &ParseError{file: l.token, err: "bad SVCB values", lex: l} } l, _ = c.Next() } diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index c9a03dec..8e3129cb 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -135,8 +135,8 @@ const ( RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] - RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] - RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] + RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] https://www.rfc-editor.org/rfc/rfc6895.html#section-2.3 + RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] https://www.rfc-editor.org/rfc/rfc6895.html#section-2.3 RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] @@ -402,6 +402,17 @@ func (rr *X25) String() string { return rr.Hdr.String() + rr.PSDNAddress } +// ISDN RR. See RFC 1183, Section 3.2. +type ISDN struct { + Hdr RR_Header + Address string + SubAddress string +} + +func (rr *ISDN) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Address, rr.SubAddress}) +} + // RT RR. See RFC 1183, Section 3.3. type RT struct { Hdr RR_Header @@ -786,7 +797,7 @@ func (rr *GPOS) String() string { return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude } -// LOC RR. See RFC RFC 1876. +// LOC RR. See RFC 1876. type LOC struct { Hdr RR_Header Version uint8 @@ -898,6 +909,11 @@ func (rr *RRSIG) String() string { return s } +// NXT RR. See RFC 2535. +type NXT struct { + NSEC +} + // NSEC RR. See RFC 4034 and RFC 3755. type NSEC struct { Hdr RR_Header @@ -982,7 +998,7 @@ func (rr *TALINK) String() string { sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) } -// SSHFP RR. See RFC RFC 4255. +// SSHFP RR. See RFC 4255. type SSHFP struct { Hdr RR_Header Algorithm uint8 @@ -996,7 +1012,7 @@ func (rr *SSHFP) String() string { " " + strings.ToUpper(rr.FingerPrint) } -// KEY RR. See RFC RFC 2535. +// KEY RR. See RFC 2535. type KEY struct { DNSKEY } @@ -1306,7 +1322,7 @@ type NINFO struct { func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } -// NID RR. See RFC RFC 6742. +// NID RR. See RFC 6742. type NID struct { Hdr RR_Header Preference uint16 diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 9fd300f6..dc34e590 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 57} +var Version = v{1, 1, 58} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 450bbbc2..03029fb3 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -481,6 +481,21 @@ func (r1 *IPSECKEY) isDuplicate(_r2 RR) bool { return true } +func (r1 *ISDN) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*ISDN) + if !ok { + return false + } + _ = r2 + if r1.Address != r2.Address { + return false + } + if r1.SubAddress != r2.SubAddress { + return false + } + return true +} + func (r1 *KEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*KEY) if !ok { @@ -871,6 +886,26 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool { return true } +func (r1 *NXT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NXT) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.NextDomain, r2.NextDomain) { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*OPENPGPKEY) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 3ea0eb42..39b3bc81 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -372,6 +372,18 @@ func (rr *IPSECKEY) pack(msg []byte, off int, compression compressionMap, compre return off, nil } +func (rr *ISDN) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Address, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.SubAddress, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { @@ -694,6 +706,18 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b return off, nil } +func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { @@ -1746,6 +1770,24 @@ func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *ISDN) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SubAddress, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart @@ -2224,6 +2266,24 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 1b6f4320..2c70fc44 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -36,6 +36,7 @@ var TypeToRR = map[uint16]func() RR{ TypeHIP: func() RR { return new(HIP) }, TypeHTTPS: func() RR { return new(HTTPS) }, TypeIPSECKEY: func() RR { return new(IPSECKEY) }, + TypeISDN: func() RR { return new(ISDN) }, TypeKEY: func() RR { return new(KEY) }, TypeKX: func() RR { return new(KX) }, TypeL32: func() RR { return new(L32) }, @@ -59,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNULL: func() RR { return new(NULL) }, + TypeNXT: func() RR { return new(NXT) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, TypePTR: func() RR { return new(PTR) }, @@ -204,6 +206,7 @@ func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *HIP) Header() *RR_Header { return &rr.Hdr } func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr } func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *ISDN) Header() *RR_Header { return &rr.Hdr } func (rr *KEY) Header() *RR_Header { return &rr.Hdr } func (rr *KX) Header() *RR_Header { return &rr.Hdr } func (rr *L32) Header() *RR_Header { return &rr.Hdr } @@ -227,6 +230,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *NXT) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } func (rr *PTR) Header() *RR_Header { return &rr.Hdr } @@ -437,6 +441,13 @@ func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int { return l } +func (rr *ISDN) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Address) + 1 + l += len(rr.SubAddress) + 1 + return l +} + func (rr *KX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -966,6 +977,10 @@ func (rr *IPSECKEY) copy() RR { } } +func (rr *ISDN) copy() RR { + return &ISDN{rr.Hdr, rr.Address, rr.SubAddress} +} + func (rr *KEY) copy() RR { return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} } @@ -1092,6 +1107,10 @@ func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } +func (rr *NXT) copy() RR { + return &NXT{*rr.NSEC.copy().(*NSEC)} +} + func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{rr.Hdr, rr.PublicKey} } diff --git a/vendor/github.com/minio/minio-go/v7/CREDITS b/vendor/github.com/minio/minio-go/v7/CREDITS new file mode 100644 index 00000000..d2092318 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CREDITS @@ -0,0 +1,1806 @@ +Go (the standard library) +https://golang.org/ +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/davecgh/go-spew +https://github.com/davecgh/go-spew +---------------------------------------------------------------- +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +================================================================ + +github.com/dustin/go-humanize +https://github.com/dustin/go-humanize +---------------------------------------------------------------- +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +================================================================ + +github.com/google/uuid +https://github.com/google/uuid +---------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/json-iterator/go +https://github.com/json-iterator/go +---------------------------------------------------------------- +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/klauspost/compress +https://github.com/klauspost/compress +---------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/klauspost/cpuid/v2 +https://github.com/klauspost/cpuid/v2 +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +================================================================ + +github.com/minio/md5-simd +https://github.com/minio/md5-simd +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/minio/sha256-simd +https://github.com/minio/sha256-simd +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/modern-go/concurrent +https://github.com/modern-go/concurrent +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/modern-go/reflect2 +https://github.com/modern-go/reflect2 +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/pmezard/go-difflib +https://github.com/pmezard/go-difflib +---------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/rs/xid +https://github.com/rs/xid +---------------------------------------------------------------- +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================ + +github.com/sirupsen/logrus +https://github.com/sirupsen/logrus +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================ + +github.com/stretchr/testify +https://github.com/stretchr/testify +---------------------------------------------------------------- +MIT License + +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +golang.org/x/crypto +https://golang.org/x/crypto +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/net +https://golang.org/x/net +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/sys +https://golang.org/x/sys +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/text +https://golang.org/x/text +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +gopkg.in/ini.v1 +https://gopkg.in/ini.v1 +---------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +gopkg.in/yaml.v3 +https://gopkg.in/yaml.v3 +---------------------------------------------------------------- + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +================================================================ + diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go new file mode 100644 index 00000000..e1155c37 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go @@ -0,0 +1,201 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "errors" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ObjectAttributesOptions are options used for the GetObjectAttributes API +// +// - MaxParts +// How many parts the caller wants to be returned (default: 1000) +// +// - VersionID +// The object version you want to attributes for +// +// - PartNumberMarker +// the listing will start AFTER the part matching PartNumberMarker +// +// - ServerSideEncryption +// The server-side encryption algorithm used when storing this object in Minio +type ObjectAttributesOptions struct { + MaxParts int + VersionID string + PartNumberMarker int + ServerSideEncryption encrypt.ServerSide +} + +// ObjectAttributes is the response object returned by the GetObjectAttributes API +// +// - VersionID +// The object version +// +// - LastModified +// The last time the object was modified +// +// - ObjectAttributesResponse +// Contains more information about the object +type ObjectAttributes struct { + VersionID string + LastModified time.Time + ObjectAttributesResponse +} + +// ObjectAttributesResponse contains details returned by the GetObjectAttributes API +// +// Noteworthy fields: +// +// - ObjectParts.PartsCount +// Contains the total part count for the object (not the current response) +// +// - ObjectParts.PartNumberMarker +// Pagination of parts will begin at (but not include) PartNumberMarker +// +// - ObjectParts.NextPartNumberMarket +// The next PartNumberMarker to be used in order to continue pagination +// +// - ObjectParts.IsTruncated +// Indicates if the last part is included in the request (does not check if parts are missing from the start of the list, ONLY the end) +// +// - ObjectParts.MaxParts +// Reflects the MaxParts used by the caller or the default MaxParts value of the API +type ObjectAttributesResponse struct { + ETag string `xml:",omitempty"` + StorageClass string + ObjectSize int + Checksum struct { + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + } + ObjectParts struct { + PartsCount int + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + IsTruncated bool + Parts []*ObjectAttributePart `xml:"Part"` + } +} + +// ObjectAttributePart is used by ObjectAttributesResponse to describe an object part +type ObjectAttributePart struct { + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + PartNumber int + Size int +} + +func (o *ObjectAttributes) parseResponse(resp *http.Response) (err error) { + mod, err := parseRFC7231Time(resp.Header.Get("Last-Modified")) + if err != nil { + return err + } + o.LastModified = mod + o.VersionID = resp.Header.Get(amzVersionID) + + response := new(ObjectAttributesResponse) + if err := xml.NewDecoder(resp.Body).Decode(response); err != nil { + return err + } + o.ObjectAttributesResponse = *response + + return +} + +// GetObjectAttributes API combines HeadObject and ListParts. +// More details on usage can be found in the documentation for ObjectAttributesOptions{} +func (c *Client) GetObjectAttributes(ctx context.Context, bucketName, objectName string, opts ObjectAttributesOptions) (*ObjectAttributes, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Add("attributes", "") + if opts.VersionID != "" { + urlValues.Add("versionId", opts.VersionID) + } + + headers := make(http.Header) + headers.Set(amzObjectAttributes, GetObjectAttributesTags) + + if opts.PartNumberMarker > 0 { + headers.Set(amzPartNumberMarker, strconv.Itoa(opts.PartNumberMarker)) + } + + if opts.MaxParts > 0 { + headers.Set(amzMaxParts, strconv.Itoa(opts.MaxParts)) + } else { + headers.Set(amzMaxParts, strconv.Itoa(GetObjectAttributesMaxParts)) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(headers) + } + + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + if err != nil { + return nil, err + } + + defer closeResponse(resp) + + hasEtag := resp.Header.Get(ETag) + if hasEtag != "" { + return nil, errors.New("getObjectAttributes is not supported by the current endpoint version") + } + + if resp.StatusCode != http.StatusOK { + ER := new(ErrorResponse) + if err := xml.NewDecoder(resp.Body).Decode(ER); err != nil { + return nil, err + } + + return nil, *ER + } + + OA := new(ObjectAttributes) + err = OA.parseResponse(resp) + if err != nil { + return nil, err + } + + return OA, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 9c0ac449..d2e93292 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -435,7 +435,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh // Generate remove multi objects XML request removeBytes := generateRemoveMultiObjectsRequest(batch) - // Execute GET on bucket to list objects. + // Execute POST on bucket to remove objects. resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ bucketName: bucketName, queryValues: urlValues, diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index f8a9b34c..1c3cb83c 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -127,7 +127,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.66" + libraryVersion = "v7.0.67" ) // User Agent should always following the below style. @@ -234,7 +234,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { clnt.httpClient = &http.Client{ Jar: jar, Transport: transport, - CheckRedirect: func(req *http.Request, via []*http.Request) error { + CheckRedirect: func(_ *http.Request, _ []*http.Request) error { return http.ErrUseLastResponse }, } diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index 401d2a74..4099a37f 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -60,12 +60,32 @@ const ( ) const ( + // GetObjectAttributesTags are tags used to defined + // return values for the GetObjectAttributes API + GetObjectAttributesTags = "ETag,Checksum,StorageClass,ObjectSize,ObjectParts" + // GetObjectAttributesMaxParts defined the default maximum + // number of parts returned by GetObjectAttributes + GetObjectAttributesMaxParts = 1000 +) + +const ( + // Response Headers + + // ETag is a common response header + ETag = "ETag" + // Storage class header. amzStorageClass = "X-Amz-Storage-Class" // Website redirect location header amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + // GetObjectAttributes headers + amzPartNumberMarker = "X-Amz-Part-Number-Marker" + amzExpectedBucketOnwer = "X-Amz-Expected-Bucket-Owner" + amzMaxParts = "X-Amz-Max-Parts" + amzObjectAttributes = "X-Amz-Object-Attributes" + // Object Tagging headers amzTaggingHeader = "X-Amz-Tagging" amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index f951cd07..a6a436c2 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -47,6 +47,7 @@ import ( "time" "github.com/dustin/go-humanize" + "github.com/google/uuid" jsoniter "github.com/json-iterator/go" "github.com/minio/sha256-simd" log "github.com/sirupsen/logrus" @@ -66,13 +67,30 @@ const ( ) const ( - serverEndpoint = "SERVER_ENDPOINT" - accessKey = "ACCESS_KEY" - secretKey = "SECRET_KEY" - enableHTTPS = "ENABLE_HTTPS" - enableKMS = "ENABLE_KMS" + serverEndpoint = "SERVER_ENDPOINT" + accessKey = "ACCESS_KEY" + secretKey = "SECRET_KEY" + enableHTTPS = "ENABLE_HTTPS" + enableKMS = "ENABLE_KMS" + appVersion = "0.1.0" + skipCERTValidation = "SKIP_CERT_VALIDATION" ) +func createHTTPTransport() (transport *http.Transport) { + var err error + transport, err = minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + if err != nil { + logError("http-transport", getFuncName(), nil, time.Now(), "", "could not create http transport", err) + return nil + } + + if mustParseBool(os.Getenv(skipCERTValidation)) { + transport.TLSClientConfig.InsecureSkipVerify = true + } + + return +} + type mintJSONFormatter struct{} func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) { @@ -425,8 +443,9 @@ func testMakeBucketError() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) @@ -437,7 +456,7 @@ func testMakeBucketError() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -478,14 +497,15 @@ func testMetadataSizeLimit() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -547,8 +567,9 @@ func testMakeBucketRegions() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) @@ -559,7 +580,7 @@ func testMakeBucketRegions() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -613,8 +634,9 @@ func testPutObjectReadAt() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -625,7 +647,7 @@ func testPutObjectReadAt() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -711,8 +733,9 @@ func testListObjectVersions() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -723,7 +746,7 @@ func testListObjectVersions() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -830,8 +853,9 @@ func testStatObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -842,7 +866,7 @@ func testStatObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -947,8 +971,9 @@ func testGetObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -959,7 +984,7 @@ func testGetObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1086,8 +1111,9 @@ func testPutObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1098,7 +1124,7 @@ func testPutObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1233,8 +1259,9 @@ func testCopyObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1245,7 +1272,7 @@ func testCopyObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1370,8 +1397,9 @@ func testConcurrentCopyObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1382,7 +1410,7 @@ func testConcurrentCopyObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1530,8 +1558,9 @@ func testComposeObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1542,7 +1571,7 @@ func testComposeObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1670,8 +1699,9 @@ func testRemoveObjectWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1682,7 +1712,7 @@ func testRemoveObjectWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1782,8 +1812,9 @@ func testRemoveObjectsWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1794,7 +1825,7 @@ func testRemoveObjectsWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1877,8 +1908,9 @@ func testObjectTaggingWithVersioning() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -1889,7 +1921,7 @@ func testObjectTaggingWithVersioning() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2044,8 +2076,9 @@ func testPutObjectWithChecksums() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -2056,7 +2089,7 @@ func testPutObjectWithChecksums() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2238,8 +2271,9 @@ func testPutMultipartObjectWithChecksums() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -2250,7 +2284,7 @@ func testPutMultipartObjectWithChecksums() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2408,6 +2442,7 @@ func testTrailingChecksums() { c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), Secure: mustParseBool(os.Getenv(enableHTTPS)), TrailingHeaders: true, }) @@ -2420,7 +2455,7 @@ func testTrailingChecksums() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2621,6 +2656,7 @@ func testPutObjectWithAutomaticChecksums() { c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), Secure: mustParseBool(os.Getenv(enableHTTPS)), TrailingHeaders: true, }) @@ -2630,7 +2666,7 @@ func testPutObjectWithAutomaticChecksums() { } // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2754,6 +2790,558 @@ func testPutObjectWithAutomaticChecksums() { successLogger(testName, function, args, startTime).Info() } +func testGetObjectAttributes() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-") + args["bucketName"] = bucketNameV + err = c.MakeBucket( + context.Background(), + bucketNameV, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + err = c.EnableVersioning(context.Background(), bucketNameV) + if err != nil { + logError(testName, function, args, startTime, "", "Unable to enable versioning", err) + return + } + + defer cleanupBucket(bucketName, c) + defer cleanupVersionedBucket(bucketNameV, c) + + testFiles := make(map[string]*objectAttributesNewObject) + testFiles["file1"] = &objectAttributesNewObject{ + Object: "file1", + ObjectReaderType: "datafile-1.03-MB", + Bucket: bucketNameV, + ContentType: "custom/contenttype", + SendContentMd5: false, + } + + testFiles["file2"] = &objectAttributesNewObject{ + Object: "file2", + ObjectReaderType: "datafile-129-MB", + Bucket: bucketName, + ContentType: "custom/contenttype", + SendContentMd5: false, + } + + for i, v := range testFiles { + bufSize := dataFileMap[v.ObjectReaderType] + + reader := getDataReader(v.ObjectReaderType) + + args["objectName"] = v.Object + testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: v.ContentType, + SendContentMd5: v.SendContentMd5, + }) + + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + testTable := make(map[string]objectAttributesTableTest) + + testTable["none-versioned"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{}, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-to-0-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 0, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-marker-to-max"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 10000, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-to-1-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 1, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["7-to-6-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 7, + MaxParts: 6, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["versioned"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{}, + test: objectAttributesTestOptions{ + TestFileName: "file1", + StorageClass: "STANDARD", + HasFullChecksum: false, + }, + } + + for i, v := range testTable { + + tf, ok := testFiles[v.test.TestFileName] + if !ok { + continue + } + + args["objectName"] = tf.Object + args["bucketName"] = tf.Bucket + if tf.UploadInfo.VersionID != "" { + v.opts.VersionID = tf.UploadInfo.VersionID + } + + s, err := c.GetObjectAttributes(context.Background(), tf.Bucket, tf.Object, v.opts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + + v.test.NumberOfParts = s.ObjectParts.PartsCount + v.test.ETag = tf.UploadInfo.ETag + v.test.ObjectSize = int(tf.UploadInfo.Size) + + err = validateObjectAttributeRequest(s, &v.opts, &v.test) + if err != nil { + logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed, table test: "+i, err) + return + } + + } + + successLogger(testName, function, args, startTime).Info() +} + +func testGetObjectAttributesSSECEncryption() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := "encrypted-object" + args["objectName"] = objectName + bufSize := dataFileMap["datafile-11-MB"] + reader := getDataReader("datafile-11-MB") + + sse := encrypt.DefaultPBKDF([]byte("word1 word2 word3 word4"), []byte(bucketName+objectName)) + + info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: "content/custom", + SendContentMd5: true, + ServerSideEncryption: sse, + PartSize: uint64(bufSize) / 2, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + opts := minio.ObjectAttributesOptions{ + ServerSideEncryption: sse, + } + attr, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + err = validateObjectAttributeRequest(attr, &opts, &objectAttributesTestOptions{ + TestFileName: info.Key, + ETag: info.ETag, + NumberOfParts: 2, + ObjectSize: int(info.Size), + HasFullChecksum: false, + HasParts: true, + HasPartChecksums: false, + }) + if err != nil { + logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +func testGetObjectAttributesErrorCases() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-") + unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-") + + _, err = c.GetObjectAttributes(context.Background(), unknownBucket, unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil) + return + } + + errorResponse := err.(minio.ErrorResponse) + if errorResponse.Code != "NoSuchBucket" { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-") + args["bucketName"] = bucketNameV + err = c.MakeBucket( + context.Background(), + bucketNameV, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + err = c.EnableVersioning(context.Background(), bucketNameV) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + defer cleanupVersionedBucket(bucketNameV, c) + + _, err = c.GetObjectAttributes(context.Background(), bucketName, unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil) + return + } + + errorResponse = err.(minio.ErrorResponse) + if errorResponse.Code != "NoSuchKey" { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), bucketName, "", minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty object name should have failed", nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), "", unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), bucketNameV, unknownObject, minio.ObjectAttributesOptions{ + VersionID: uuid.NewString(), + }) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + errorResponse = err.(minio.ErrorResponse) + if errorResponse.Code != "NoSuchVersion" { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil) + return + } + + successLogger(testName, function, args, startTime).Info() +} + +type objectAttributesNewObject struct { + Object string + ObjectReaderType string + Bucket string + ContentType string + SendContentMd5 bool + UploadInfo minio.UploadInfo +} + +type objectAttributesTableTest struct { + opts minio.ObjectAttributesOptions + test objectAttributesTestOptions +} + +type objectAttributesTestOptions struct { + TestFileName string + ETag string + NumberOfParts int + StorageClass string + ObjectSize int + HasPartChecksums bool + HasFullChecksum bool + HasParts bool +} + +func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.ObjectAttributesOptions, test *objectAttributesTestOptions) (err error) { + if opts.VersionID != "" { + if OA.VersionID != opts.VersionID { + err = fmt.Errorf("Expected versionId %s but got versionId %s", opts.VersionID, OA.VersionID) + return + } + } + + partsMissingChecksum := false + foundPartChecksum := false + for _, v := range OA.ObjectParts.Parts { + checksumFound := false + if v.ChecksumSHA256 != "" { + checksumFound = true + } else if v.ChecksumSHA1 != "" { + checksumFound = true + } else if v.ChecksumCRC32 != "" { + checksumFound = true + } else if v.ChecksumCRC32C != "" { + checksumFound = true + } + if !checksumFound { + partsMissingChecksum = true + } else { + foundPartChecksum = true + } + } + + if test.HasPartChecksums { + if partsMissingChecksum { + err = fmt.Errorf("One or all parts were missing a checksum") + return + } + } else { + if foundPartChecksum { + err = fmt.Errorf("Did not expect ObjectParts to have checksums but found one") + return + } + } + + hasFullObjectChecksum := true + if OA.Checksum.ChecksumCRC32 == "" { + if OA.Checksum.ChecksumCRC32C == "" { + if OA.Checksum.ChecksumSHA1 == "" { + if OA.Checksum.ChecksumSHA256 == "" { + hasFullObjectChecksum = false + } + } + } + } + + if test.HasFullChecksum { + if !hasFullObjectChecksum { + err = fmt.Errorf("Full object checksum not found") + return + } + } else { + if hasFullObjectChecksum { + err = fmt.Errorf("Did not expect a full object checksum but we got one") + return + } + } + + if OA.ETag != test.ETag { + err = fmt.Errorf("Etags do not match, got %s but expected %s", OA.ETag, test.ETag) + return + } + + if test.HasParts { + if len(OA.ObjectParts.Parts) < 1 { + err = fmt.Errorf("Was expecting ObjectParts but none were present") + return + } + } + + if OA.StorageClass == "" { + err = fmt.Errorf("Was expecting a StorageClass but got none") + return + } + + if OA.ObjectSize != test.ObjectSize { + err = fmt.Errorf("Was expecting a ObjectSize but got none") + return + } + + if test.HasParts { + if opts.MaxParts == 0 { + if len(OA.ObjectParts.Parts) != OA.ObjectParts.PartsCount { + err = fmt.Errorf("expected %s parts but got %d", OA.ObjectParts.PartsCount, len(OA.ObjectParts.Parts)) + return + } + } else if (opts.MaxParts + opts.PartNumberMarker) > OA.ObjectParts.PartsCount { + if len(OA.ObjectParts.Parts) != (OA.ObjectParts.PartsCount - opts.PartNumberMarker) { + err = fmt.Errorf("expected %d parts but got %d", (OA.ObjectParts.PartsCount - opts.PartNumberMarker), len(OA.ObjectParts.Parts)) + return + } + } else if opts.MaxParts != 0 { + if opts.MaxParts != len(OA.ObjectParts.Parts) { + err = fmt.Errorf("expected %d parts but got %d", opts.MaxParts, len(OA.ObjectParts.Parts)) + return + } + } + } + + if OA.ObjectParts.NextPartNumberMarker == OA.ObjectParts.PartsCount { + if OA.ObjectParts.IsTruncated { + err = fmt.Errorf("Expected ObjectParts to NOT be truncated, but it was") + return + } + } + + if OA.ObjectParts.NextPartNumberMarker != OA.ObjectParts.PartsCount { + if !OA.ObjectParts.IsTruncated { + err = fmt.Errorf("Expected ObjectParts to be truncated, but it was NOT") + return + } + } + + return +} + // Test PutObject using a large data to trigger multipart readat func testPutObjectWithMetadata() { // initialize logging params @@ -2777,8 +3365,9 @@ func testPutObjectWithMetadata() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -2789,7 +3378,7 @@ func testPutObjectWithMetadata() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2883,8 +3472,9 @@ func testPutObjectWithContentLanguage() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -2895,7 +3485,7 @@ func testPutObjectWithContentLanguage() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -2952,8 +3542,9 @@ func testPutObjectStreaming() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -2964,7 +3555,7 @@ func testPutObjectStreaming() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -3023,8 +3614,9 @@ func testGetObjectSeekEnd() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3035,7 +3627,7 @@ func testGetObjectSeekEnd() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -3145,8 +3737,9 @@ func testGetObjectClosedTwice() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3157,7 +3750,7 @@ func testGetObjectClosedTwice() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -3235,8 +3828,9 @@ func testRemoveObjectsContext() { // Instantiate new minio client. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3244,7 +3838,7 @@ func testRemoveObjectsContext() { } // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Enable tracing, write to stdout. // c.TraceOn(os.Stderr) @@ -3331,8 +3925,9 @@ func testRemoveMultipleObjects() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3340,7 +3935,7 @@ func testRemoveMultipleObjects() { } // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Enable tracing, write to stdout. // c.TraceOn(os.Stderr) @@ -3414,8 +4009,9 @@ func testRemoveMultipleObjectsWithResult() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3423,7 +4019,7 @@ func testRemoveMultipleObjectsWithResult() { } // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Enable tracing, write to stdout. // c.TraceOn(os.Stderr) @@ -3549,8 +4145,9 @@ func testFPutObjectMultipart() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3561,7 +4158,7 @@ func testFPutObjectMultipart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -3654,8 +4251,9 @@ func testFPutObject() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3666,7 +4264,7 @@ func testFPutObject() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -3823,8 +4421,9 @@ func testFPutObjectContext() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3835,7 +4434,7 @@ func testFPutObjectContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -3923,8 +4522,9 @@ func testFPutObjectContextV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -3935,7 +4535,7 @@ func testFPutObjectContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4024,8 +4624,9 @@ func testPutObjectContext() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4036,7 +4637,7 @@ func testPutObjectContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4096,8 +4697,9 @@ func testGetObjectS3Zip() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4108,7 +4710,7 @@ func testGetObjectS3Zip() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4279,8 +4881,9 @@ func testGetObjectReadSeekFunctional() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4291,7 +4894,7 @@ func testGetObjectReadSeekFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4448,8 +5051,9 @@ func testGetObjectReadAtFunctional() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4460,7 +5064,7 @@ func testGetObjectReadAtFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4625,8 +5229,9 @@ func testGetObjectReadAtWhenEOFWasReached() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4637,7 +5242,7 @@ func testGetObjectReadAtWhenEOFWasReached() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4744,8 +5349,9 @@ func testPresignedPostPolicy() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4756,7 +5362,7 @@ func testPresignedPostPolicy() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -4965,8 +5571,9 @@ func testCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -4977,7 +5584,7 @@ func testCopyObject() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5159,8 +5766,9 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -5171,7 +5779,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5341,8 +5949,9 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -5353,7 +5962,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5521,8 +6130,9 @@ func testSSECEncryptedGetObjectReadAtFunctional() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -5533,7 +6143,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5704,8 +6314,9 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -5716,7 +6327,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5888,8 +6499,9 @@ func testSSECEncryptionPutGet() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -5900,7 +6512,7 @@ func testSSECEncryptionPutGet() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5997,8 +6609,9 @@ func testSSECEncryptionFPut() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -6009,7 +6622,7 @@ func testSSECEncryptionFPut() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -6119,8 +6732,9 @@ func testSSES3EncryptionPutGet() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -6131,7 +6745,7 @@ func testSSES3EncryptionPutGet() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -6226,8 +6840,9 @@ func testSSES3EncryptionFPut() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -6238,7 +6853,7 @@ func testSSES3EncryptionFPut() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -6353,8 +6968,9 @@ func testBucketNotification() { c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -6365,7 +6981,7 @@ func testBucketNotification() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := os.Getenv("NOTIFY_BUCKET") args["bucketName"] = bucketName @@ -6447,8 +7063,9 @@ func testFunctional() { c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) @@ -6459,7 +7076,7 @@ func testFunctional() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7134,8 +7751,9 @@ func testGetObjectModified() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -7146,7 +7764,7 @@ func testGetObjectModified() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7229,8 +7847,9 @@ func testPutObjectUploadSeekedObject() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -7241,7 +7860,7 @@ func testPutObjectUploadSeekedObject() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7351,8 +7970,9 @@ func testMakeBucketErrorV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -7363,7 +7983,7 @@ func testMakeBucketErrorV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7410,8 +8030,9 @@ func testGetObjectClosedTwiceV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -7422,7 +8043,7 @@ func testGetObjectClosedTwiceV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7500,8 +8121,9 @@ func testFPutObjectV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -7512,7 +8134,7 @@ func testFPutObjectV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7660,8 +8282,9 @@ func testMakeBucketRegionsV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -7672,7 +8295,7 @@ func testMakeBucketRegionsV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7722,8 +8345,9 @@ func testGetObjectReadSeekFunctionalV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -7734,7 +8358,7 @@ func testGetObjectReadSeekFunctionalV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7876,8 +8500,9 @@ func testGetObjectReadAtFunctionalV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -7888,7 +8513,7 @@ func testGetObjectReadAtFunctionalV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -8037,8 +8662,9 @@ func testCopyObjectV2() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8049,7 +8675,7 @@ func testCopyObjectV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -8252,8 +8878,9 @@ func testComposeObjectErrorCasesV2() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8349,8 +8976,9 @@ func testCompose10KSourcesV2() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8370,8 +8998,9 @@ func testEncryptedEmptyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -8630,8 +9259,9 @@ func testUnencryptedToSSECCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8656,8 +9286,9 @@ func testUnencryptedToSSES3CopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8683,8 +9314,9 @@ func testUnencryptedToUnencryptedCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8709,8 +9341,9 @@ func testEncryptedSSECToSSECCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8736,8 +9369,9 @@ func testEncryptedSSECToSSES3CopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8763,8 +9397,9 @@ func testEncryptedSSECToUnencryptedCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8790,8 +9425,9 @@ func testEncryptedSSES3ToSSECCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8817,8 +9453,9 @@ func testEncryptedSSES3ToSSES3CopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8844,8 +9481,9 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8871,8 +9509,9 @@ func testEncryptedCopyObjectV2() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8897,8 +9536,9 @@ func testDecryptedCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) @@ -8956,8 +9596,9 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -8971,7 +9612,7 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9153,8 +9794,9 @@ func testSSECEncryptedToSSECCopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -9168,7 +9810,7 @@ func testSSECEncryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9330,8 +9972,9 @@ func testSSECEncryptedToUnencryptedCopyPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -9345,7 +9988,7 @@ func testSSECEncryptedToUnencryptedCopyPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9506,8 +10149,9 @@ func testSSECEncryptedToSSES3CopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -9521,7 +10165,7 @@ func testSSECEncryptedToSSES3CopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9685,8 +10329,9 @@ func testUnencryptedToSSECCopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -9700,7 +10345,7 @@ func testUnencryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9859,8 +10504,9 @@ func testUnencryptedToUnencryptedCopyPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -9874,7 +10520,7 @@ func testUnencryptedToUnencryptedCopyPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10029,8 +10675,9 @@ func testUnencryptedToSSES3CopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -10044,7 +10691,7 @@ func testUnencryptedToSSES3CopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10201,8 +10848,9 @@ func testSSES3EncryptedToSSECCopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -10216,7 +10864,7 @@ func testSSES3EncryptedToSSECCopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10376,8 +11024,9 @@ func testSSES3EncryptedToUnencryptedCopyPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -10391,7 +11040,7 @@ func testSSES3EncryptedToUnencryptedCopyPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10547,8 +11196,9 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { // Instantiate new minio client object client, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -10562,7 +11212,7 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10720,8 +11370,9 @@ func testUserMetadataCopying() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -10896,8 +11547,9 @@ func testUserMetadataCopyingV2() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -10918,8 +11570,9 @@ func testStorageClassMetadataPutObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -11005,8 +11658,9 @@ func testStorageClassInvalidMetadataPutObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -11047,8 +11701,9 @@ func testStorageClassMetadataCopyObject() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) @@ -11176,8 +11831,9 @@ func testPutObjectNoLengthV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -11188,7 +11844,7 @@ func testPutObjectNoLengthV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -11251,8 +11907,9 @@ func testPutObjectsUnknownV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -11263,7 +11920,7 @@ func testPutObjectsUnknownV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -11341,8 +11998,9 @@ func testPutObject0ByteV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -11353,7 +12011,7 @@ func testPutObject0ByteV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -11402,8 +12060,9 @@ func testComposeObjectErrorCases() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -11424,8 +12083,9 @@ func testCompose10KSources() { // Instantiate new minio client object c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -11449,8 +12109,9 @@ func testFunctionalV2() { c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -11461,7 +12122,7 @@ func testFunctionalV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -11909,8 +12570,9 @@ func testGetObjectContext() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) @@ -11921,7 +12583,7 @@ func testGetObjectContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12011,8 +12673,9 @@ func testFGetObjectContext() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) @@ -12023,7 +12686,7 @@ func testFGetObjectContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12099,8 +12762,9 @@ func testGetObjectRanges() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) @@ -12111,7 +12775,7 @@ func testGetObjectRanges() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rng, "minio-go-test-") @@ -12208,8 +12872,9 @@ func testGetObjectACLContext() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) @@ -12220,7 +12885,7 @@ func testGetObjectACLContext() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12383,8 +13048,9 @@ func testPutObjectContextV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -12395,7 +13061,7 @@ func testPutObjectContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12457,8 +13123,9 @@ func testGetObjectContextV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -12469,7 +13136,7 @@ func testGetObjectContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12557,8 +13224,9 @@ func testFGetObjectContextV2() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) @@ -12569,7 +13237,7 @@ func testFGetObjectContextV2() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12645,8 +13313,9 @@ func testListObjects() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) @@ -12657,7 +13326,7 @@ func testListObjects() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12745,8 +13414,9 @@ func testRemoveObjects() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) @@ -12757,7 +13427,7 @@ func testRemoveObjects() { // c.TraceOn(os.Stderr) // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -12891,6 +13561,8 @@ func main() { // execute tests if isFullMode() { + testGetObjectAttributes() + testGetObjectAttributesErrorCases() testMakeBucketErrorV2() testGetObjectClosedTwiceV2() testFPutObjectV2() @@ -12962,6 +13634,7 @@ func main() { // SSE-C tests will only work over TLS connection. if tls { + testGetObjectAttributesSSECEncryption() testSSECEncryptionPutGet() testSSECEncryptionFPut() testSSECEncryptedGetObjectReadAtFunctional() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index c52f78c3..10c95ffe 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -402,7 +402,7 @@ func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { // IsNull returns true if both date and days fields are null func (e Expiration) IsNull() bool { - return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() + return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() && !e.DeleteAll.IsEnabled() } // MarshalXML is expiration is non null diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go index c35e58e1..2566a3df 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go @@ -149,22 +149,19 @@ func (set StringSet) MarshalJSON() ([]byte, error) { } // UnmarshalJSON - parses JSON data and creates new set with it. -// If 'data' contains JSON string array, the set contains each string. -// If 'data' contains JSON string, the set contains the string as one element. -// If 'data' contains Other JSON types, JSON parse error is returned. func (set *StringSet) UnmarshalJSON(data []byte) error { - sl := []string{} + sl := []interface{}{} var err error if err = json.Unmarshal(data, &sl); err == nil { *set = make(StringSet) for _, s := range sl { - set.Add(s) + set.Add(fmt.Sprintf("%v", s)) } } else { - var s string + var s interface{} if err = json.Unmarshal(data, &s); err == nil { *set = make(StringSet) - set.Add(s) + set.Add(fmt.Sprintf("%v", s)) } } diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index e39eba02..94c19b2a 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -21,6 +21,7 @@ import ( "context" "crypto/md5" fipssha256 "crypto/sha256" + "crypto/tls" "encoding/base64" "encoding/hex" "encoding/xml" @@ -513,6 +514,7 @@ func isAmzHeader(headerKey string) bool { // supportedQueryValues is a list of query strings that can be passed in when using GetObject. var supportedQueryValues = map[string]bool{ + "attributes": true, "partNumber": true, "versionId": true, "response-cache-control": true, @@ -622,7 +624,7 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { urlErr := &url.Error{} if errors.As(err, &urlErr) { switch urlErr.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: + case *net.DNSError, *net.OpError, net.UnknownNetworkError, *tls.CertificateVerificationError: return true } } @@ -649,7 +651,12 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { case strings.Contains(err.Error(), "connection refused"): // If err is connection refused return true - + case strings.Contains(err.Error(), "server gave HTTP response to HTTPS client"): + // If err is TLS client is used with HTTP server + return true + case strings.Contains(err.Error(), "Client sent an HTTP request to an HTTPS server"): + // If err is plain-text Client is used with a HTTPS server + return true case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): // Denial errors return true diff --git a/vendor/github.com/power-devops/perfstat/cpustat.go b/vendor/github.com/power-devops/perfstat/cpustat.go index d456e68e..10f543fa 100644 --- a/vendor/github.com/power-devops/perfstat/cpustat.go +++ b/vendor/github.com/power-devops/perfstat/cpustat.go @@ -136,4 +136,3 @@ func CpuUtilTotalStat() (*CPUUtil, error) { u := perfstatcpuutil2cpuutil(cpuutil) return &u, nil } - diff --git a/vendor/github.com/power-devops/perfstat/doc.go b/vendor/github.com/power-devops/perfstat/doc.go index a0439c5a..9730a61c 100644 --- a/vendor/github.com/power-devops/perfstat/doc.go +++ b/vendor/github.com/power-devops/perfstat/doc.go @@ -37,24 +37,24 @@ func DisableLVMStat() {} // CpuStat() returns array of CPU structures with information about // logical CPUs on the system. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html func CpuStat() ([]CPU, error) { return nil, fmt.Errorf("not implemented") } // CpuTotalStat() returns general information about CPUs on the system. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html func CpuTotalStat() (*CPUTotal, error) { return nil, fmt.Errorf("not implemented") } // CpuUtilStat() calculates CPU utilization. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) { return nil, fmt.Errorf("not implemented") } diff --git a/vendor/github.com/power-devops/perfstat/helpers.go b/vendor/github.com/power-devops/perfstat/helpers.go index 654cdcf3..d5268ab5 100644 --- a/vendor/github.com/power-devops/perfstat/helpers.go +++ b/vendor/github.com/power-devops/perfstat/helpers.go @@ -8,6 +8,7 @@ package perfstat #include #include +#include #include "c_helpers.h" */ @@ -763,3 +764,56 @@ func fsinfo2filesystem(n *C.struct_fsinfo) FileSystem { return i } + +func lparinfo2partinfo(n C.lpar_info_format2_t) PartitionInfo { + var i PartitionInfo + + i.Version = int(n.version) + i.OnlineMemory = uint64(n.online_memory) + i.TotalDispatchTime = uint64(n.tot_dispatch_time) + i.PoolIdleTime = uint64(n.pool_idle_time) + i.DispatchLatency = uint64(n.dispatch_latency) + i.LparFlags = uint(n.lpar_flags) + i.PCpusInSys = uint(n.pcpus_in_sys) + i.OnlineVCpus = uint(n.online_vcpus) + i.OnlineLCpus = uint(n.online_lcpus) + i.PCpusInPool = uint(n.pcpus_in_pool) + i.UnallocCapacity = uint(n.unalloc_capacity) + i.EntitledCapacity = uint(n.entitled_capacity) + i.VariableWeight = uint(n.variable_weight) + i.UnallocWeight = uint(n.unalloc_weight) + i.MinReqVCpuCapacity = uint(n.min_req_vcpu_capacity) + i.GroupId = uint8(n.group_id) + i.PoolId = uint8(n.pool_id) + i.ShCpusInSys = uint(n.shcpus_in_sys) + i.MaxPoolCapacity = uint(n.max_pool_capacity) + i.EntitledPoolCapacity = uint(n.entitled_pool_capacity) + i.PoolMaxTime = uint64(n.pool_max_time) + i.PoolBusyTime = uint64(n.pool_busy_time) + i.PoolScaledBusyTime = uint64(n.pool_scaled_busy_time) + i.ShCpuTotalTime = uint64(n.shcpu_tot_time) + i.ShCpuBusyTime = uint64(n.shcpu_busy_time) + i.ShCpuScaledBusyTime = uint64(n.shcpu_scaled_busy_time) + i.EntMemCapacity = uint64(n.ent_mem_capacity) + i.PhysMem = uint64(n.phys_mem) + i.VrmPoolPhysMem = uint64(n.vrm_pool_physmem) + i.HypPageSize = uint(n.hyp_pagesize) + i.VrmPoolId = int(n.vrm_pool_id) + i.VrmGroupId = int(n.vrm_group_id) + i.VarMemWeight = int(n.var_mem_weight) + i.UnallocVarMemWeight = int(n.unalloc_var_mem_weight) + i.UnallocEntMemCapacity = uint64(n.unalloc_ent_mem_capacity) + i.TrueOnlineMemory = uint64(n.true_online_memory) + i.AmeOnlineMemory = uint64(n.ame_online_memory) + i.AmeType = uint8(n.ame_type) + i.SpecExecMode = uint8(n.spec_exec_mode) + i.AmeFactor = uint(n.ame_factor) + i.EmPartMajorCode = uint(n.em_part_major_code) + i.EmPartMinorCode = uint(n.em_part_minor_code) + i.BytesCoalesced = uint64(n.bytes_coalesced) + i.BytesCoalescedMemPool = uint64(n.bytes_coalesced_mempool) + i.PurrCoalescing = uint64(n.purr_coalescing) + i.SpurrCoalescing = uint64(n.spurr_coalescing) + + return i +} diff --git a/vendor/github.com/power-devops/perfstat/lparstat.go b/vendor/github.com/power-devops/perfstat/lparstat.go index 06f79fd5..470a1af2 100644 --- a/vendor/github.com/power-devops/perfstat/lparstat.go +++ b/vendor/github.com/power-devops/perfstat/lparstat.go @@ -7,11 +7,13 @@ package perfstat #cgo LDFLAGS: -lperfstat #include +#include */ import "C" import ( "fmt" + "unsafe" ) func PartitionStat() (*PartitionConfig, error) { @@ -25,3 +27,14 @@ func PartitionStat() (*PartitionConfig, error) { return &p, nil } + +func LparInfo() (*PartitionInfo, error) { + var pinfo C.lpar_info_format2_t + + rc := C.lpar_get_info(C.LPAR_INFO_FORMAT2, unsafe.Pointer(&pinfo), C.sizeof_lpar_info_format2_t) + if rc != 0 { + return nil, fmt.Errorf("lpar_get_info() error") + } + p := lparinfo2partinfo(pinfo) + return &p, nil +} diff --git a/vendor/github.com/power-devops/perfstat/systemcfg.go b/vendor/github.com/power-devops/perfstat/systemcfg.go index 7f9277bc..b7c7b725 100644 --- a/vendor/github.com/power-devops/perfstat/systemcfg.go +++ b/vendor/github.com/power-devops/perfstat/systemcfg.go @@ -71,6 +71,7 @@ const ( SC_TM_VER = 59 /* Transaction Memory version, 0 - not capable */ SC_NX_CAP = 60 /* NX GZIP capable */ SC_PKS_STATE = 61 /* Platform KeyStore */ + SC_MMA_VER = 62 ) /* kernel attributes */ @@ -120,6 +121,7 @@ const ( IMPL_POWER7 = 0x8000 /* 7 class CPU */ IMPL_POWER8 = 0x10000 /* 8 class CPU */ IMPL_POWER9 = 0x20000 /* 9 class CPU */ + IMPL_POWER10 = 0x20000 /* 10 class CPU */ ) // Values for implementation field for IA64 Architectures @@ -152,11 +154,13 @@ const ( PV_7 = 0x200000 /* Power PC 7 */ PV_8 = 0x300000 /* Power PC 8 */ PV_9 = 0x400000 /* Power PC 9 */ + PV_10 = 0x500000 /* Power PC 10 */ PV_5_Compat = 0x0F8000 /* Power PC 5 */ PV_6_Compat = 0x108000 /* Power PC 6 */ PV_7_Compat = 0x208000 /* Power PC 7 */ PV_8_Compat = 0x308000 /* Power PC 8 */ PV_9_Compat = 0x408000 /* Power PC 9 */ + PV_10_Compat = 0x508000 /* Power PC 10 */ PV_RESERVED_2 = 0x0A0000 /* source compatability */ PV_RESERVED_3 = 0x0B0000 /* source compatability */ PV_RS2 = 0x040000 /* Power RS2 */ @@ -182,19 +186,21 @@ const ( // Macros for identifying physical processor const ( - PPI4_1 = 0x35 - PPI4_2 = 0x38 - PPI4_3 = 0x39 - PPI4_4 = 0x3C - PPI4_5 = 0x44 - PPI5_1 = 0x3A - PPI5_2 = 0x3B - PPI6_1 = 0x3E - PPI7_1 = 0x3F - PPI7_2 = 0x4A - PPI8_1 = 0x4B - PPI8_2 = 0x4D - PPI9 = 0x4E + PPI4_1 = 0x35 + PPI4_2 = 0x38 + PPI4_3 = 0x39 + PPI4_4 = 0x3C + PPI4_5 = 0x44 + PPI5_1 = 0x3A + PPI5_2 = 0x3B + PPI6_1 = 0x3E + PPI7_1 = 0x3F + PPI7_2 = 0x4A + PPI8_1 = 0x4B + PPI8_2 = 0x4D + PPI9 = 0x4E + PPI9_1 = 0x4E + PPI10_1 = 0x80 ) // Macros for kernel attributes @@ -292,14 +298,32 @@ func GetCPUImplementation() string { return "POWER8" case impl&IMPL_POWER9 != 0: return "POWER9" + case impl&IMPL_POWER10 != 0: + return "Power10" default: return "Unknown" } } +func POWER10OrNewer() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER10 != 0 { + return true + } + return false +} + +func POWER10() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER10 != 0 { + return true + } + return false +} + func POWER9OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 { return true } return false @@ -315,7 +339,7 @@ func POWER9() bool { func POWER8OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { return true } return false @@ -331,7 +355,7 @@ func POWER8() bool { func POWER7OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { return true } return false @@ -420,6 +444,8 @@ func PksEnabled() bool { func CPUMode() string { impl := unix.Getsystemcfg(SC_VERS) switch impl { + case PV_10, PV_10_Compat: + return "Power10" case PV_9, PV_9_Compat: return "POWER9" case PV_8, PV_8_Compat: diff --git a/vendor/github.com/power-devops/perfstat/types_disk.go b/vendor/github.com/power-devops/perfstat/types_disk.go index ca1493d8..50e323db 100644 --- a/vendor/github.com/power-devops/perfstat/types_disk.go +++ b/vendor/github.com/power-devops/perfstat/types_disk.go @@ -29,8 +29,8 @@ type DiskTotal struct { // Disk Adapter Types const ( DA_SCSI = 0 /* 0 ==> SCSI, SAS, other legacy adapter types */ - DA_VSCSI /* 1 ==> Virtual SCSI/SAS Adapter */ - DA_FCA /* 2 ==> Fiber Channel Adapter */ + DA_VSCSI = 1 /* 1 ==> Virtual SCSI/SAS Adapter */ + DA_FCA = 2 /* 2 ==> Fiber Channel Adapter */ ) type DiskAdapter struct { diff --git a/vendor/github.com/power-devops/perfstat/types_lpar.go b/vendor/github.com/power-devops/perfstat/types_lpar.go index 2d3c32fa..f95f8c30 100644 --- a/vendor/github.com/power-devops/perfstat/types_lpar.go +++ b/vendor/github.com/power-devops/perfstat/types_lpar.go @@ -66,3 +66,64 @@ type PartitionConfig struct { TargetMemExpSize int64 /* Expanded Memory Size in MB */ SubProcessorMode int32 /* Split core mode, its value can be 0,1,2 or 4. 0 for unsupported, 1 for capable but not enabled, 2 or 4 for enabled*/ } + +const ( + AME_TYPE_V1 = 0x1 + AME_TYPE_V2 = 0x2 + LPAR_INFO_CAPPED = 0x01 /* Parition Capped */ + LPAR_INFO_AUTH_PIC = 0x02 /* Authority granted for poolidle*/ + LPAR_INFO_SMT_ENABLED = 0x04 /* SMT Enabled */ + LPAR_INFO_WPAR_ACTIVE = 0x08 /* Process Running Within a WPAR */ + LPAR_INFO_EXTENDED = 0x10 /* Extended shared processor pool information */ + LPAR_INFO_AME_ENABLED = 0x20 /* Active Mem. Expansion (AME) enabled*/ + LPAR_INFO_SEM_ENABLED = 0x40 /* Speculative Execution Mode enabled */ +) + +type PartitionInfo struct { + Version int /* version for this structure */ + OnlineMemory uint64 /* MB of currently online memory */ + TotalDispatchTime uint64 /* Total lpar dispatch time in nsecs */ + PoolIdleTime uint64 /* Idle time of shared CPU pool nsecs*/ + DispatchLatency uint64 /* Max latency inbetween dispatches of this LPAR on physCPUS in nsecs */ + LparFlags uint /* LPAR flags */ + PCpusInSys uint /* # of active licensed physical CPUs in system */ + OnlineVCpus uint /* # of current online virtual CPUs */ + OnlineLCpus uint /* # of current online logical CPUs */ + PCpusInPool uint /* # physical CPUs in shared pool */ + UnallocCapacity uint /* Unallocated Capacity available in shared pool */ + EntitledCapacity uint /* Entitled Processor Capacity for this partition */ + VariableWeight uint /* Variable Processor Capacity Weight */ + UnallocWeight uint /* Unallocated Variable Weight available for this partition */ + MinReqVCpuCapacity uint /* OS minimum required virtual processor capacity. */ + GroupId uint8 /* ID of a LPAR group/aggregation */ + PoolId uint8 /* ID of a shared pool */ + ShCpusInSys uint /* # of physical processors allocated for shared processor use */ + MaxPoolCapacity uint /* Maximum processor capacity of partition's pool */ + EntitledPoolCapacity uint /* Entitled processor capacity of partition's pool */ + PoolMaxTime uint64 /* Summation of maximum time that could be consumed by the pool, in nanoseconds */ + PoolBusyTime uint64 /* Summation of busy time accumulated across all partitions in the pool, in nanoseconds */ + PoolScaledBusyTime uint64 /* Scaled summation of busy time accumulated across all partitions in the pool, in nanoseconds */ + ShCpuTotalTime uint64 /* Summation of total time across all physical processors allocated for shared processor use, in nanoseconds */ + ShCpuBusyTime uint64 /* Summation of busy time accumulated across all shared processor partitions, in nanoseconds */ + ShCpuScaledBusyTime uint64 /* Scaled summation of busy time accumulated across all shared processor partitions, in nanoseconds */ + EntMemCapacity uint64 /* Partition's current entitlement memory capacity setting */ + PhysMem uint64 /* Amount of physical memory, in bytes, currently backing the partition's logical memory */ + VrmPoolPhysMem uint64 /* Total amount of physical memory in the VRM pool */ + HypPageSize uint /* Page size hypervisor is using to virtualize partition's memory */ + VrmPoolId int /* ID of VRM pool */ + VrmGroupId int /* eWLM VRM group to which partition belongs */ + VarMemWeight int /* Partition's current variable memory capacity weighting setting */ + UnallocVarMemWeight int /* Amount of unallocated variable memory capacity weight available to LPAR's group */ + UnallocEntMemCapacity uint64 /* Amount of unallocated I/O memory entitlement available to LPAR's group */ + TrueOnlineMemory uint64 /* true MB of currently online memory */ + AmeOnlineMemory uint64 /* AME MB of currently online memory */ + AmeType uint8 + SpecExecMode uint8 /* Speculative Execution Mode */ + AmeFactor uint /* memory expansion factor for LPAR */ + EmPartMajorCode uint /* Major and minor codes for our */ + EmPartMinorCode uint /* current energy management mode */ + BytesCoalesced uint64 /* The number of bytes of the calling partition.s logical real memory coalesced because they contained duplicated data */ + BytesCoalescedMemPool uint64 /* If the calling partition is authorized to see pool wide statistics then the number of bytes of logical real memory coalesced because they contained duplicated data in the calling partition.s memory pool else set to zero.*/ + PurrCoalescing uint64 /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/ + SpurrCoalescing uint64 /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero.*/ +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index cee360db..2f154907 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -483,6 +483,8 @@ type Histogram struct { // histograms. PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` } func (x *Histogram) Reset() { @@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 { return nil } +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { @@ -923,6 +932,7 @@ type MetricFamily struct { Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` } func (x *MetricFamily) Reset() { @@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric { return nil } +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ @@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, @@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, - 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, - 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, - 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, - 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, } var ( @@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 0ca86a3d..b2b89b01 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) @@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return FmtUnknown + return fmtUnknown } const textType = "text/plain" @@ -52,28 +53,28 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown + return fmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown + return fmtUnknown } - return FmtProtoDelim + return fmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown + return fmtUnknown } - return FmtText + return fmtText } - return FmtUnknown + return fmtUnknown } // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: + switch format.FormatType() { + case TypeProtoDelim: return &protoDecoder{r: r} } return &textDecoder{r: r} @@ -86,8 +87,10 @@ type protoDecoder struct { // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ca214060..8fd80618 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,10 +18,12 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "github.com/prometheus/common/model" + dto "github.com/prometheus/client_model/go" ) @@ -60,23 +62,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } } - return FmtText + return fmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -84,29 +95,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - if ver == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0 + switch ver { + case OpenMetricsVersion_1_0_0: + return fmtOpenMetrics_1_0_0 + escapingScheme + default: + return fmtOpenMetrics_0_0_1 + escapingScheme } - return FmtOpenMetrics_0_0_1 } } - return FmtText + return fmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -115,44 +137,48 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c4cb20f0..6fc9555e 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,30 +14,154 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Note that these + // values are now unexported. If code was relying on comparisons to these + // constants, instead use FormatType(). + fmtUnknown Format = `` + fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + fmtProtoDelim Format = protoFmt + ` encoding=delimited` + fmtProtoText Format = protoFmt + ` encoding=text` + fmtProtoCompact Format = protoFmt + ` encoding=compact-text` + fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// NewFormat generates a new Format from the type provided. Mostly used for +// tests, most Formats should be generated as part of content negotiation in +// encode.go. +func NewFormat(t FormatType) Format { + switch t { + case TypeProtoCompact: + return fmtProtoCompact + case TypeProtoDelim: + return fmtProtoDelim + case TypeProtoText: + return fmtProtoText + case TypeTextPlain: + return fmtText + case TypeOpenMetrics: + return fmtOpenMetrics_1_0_0 + default: + return fmtUnknown + } +} + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + if len(toks) < 2 { + return TypeUnknown + } + + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 21cdddcf..5622578e 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -35,6 +35,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -98,7 +110,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -303,21 +315,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -365,27 +365,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -451,7 +482,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 2946b8f1..f9b8265a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -62,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -280,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -330,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(model.LabelValue(name)) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 35db1cc9..26490211 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,6 +16,7 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" @@ -24,8 +25,9 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn { // which is not an error but the signal that we are done. // Any other error that happens to align with the start of // a line is still an error. - if p.err == io.EOF { + if errors.Is(p.err, io.EOF) { p.err = nil } return nil diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 35e739c7..178fdbaf 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -90,13 +90,13 @@ func (a *Alert) Validate() error { return fmt.Errorf("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return fmt.Errorf("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index ef895633..3317ce22 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff name matches the pattern of LabelNameRE for legacy +// names, and iff it's valid UTF-8 if NameValidationScheme is set to +// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the +// check but a much faster hardcoded implementation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false + switch NameValidationScheme { + case LegacyValidation: + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } } + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } return true } @@ -164,7 +172,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 00000000..447ab8ad --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7f..0bd29b3a 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -18,15 +18,84 @@ import ( "regexp" "sort" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // NameValidationScheme determines the method of name validation to be used by + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be + // escaped when presented to systems that do not support UTF-8 names. If the + // Content-Type "escaping" term is specified, that will override this value. + NameEscapingScheme = ValueEncodingEscaping ) +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requirets that metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation +) + +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" +) + +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet @@ -86,17 +155,302 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(n) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n LabelValue) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + return true + } + if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + return true + } + } + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value. + if j > 4 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme " + s) + } +} diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go index 8762b13c..dc8a0026 100644 --- a/vendor/github.com/prometheus/common/model/signature.go +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d..910b0b71 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -81,7 +81,7 @@ func (s *Silence) Validate() error { } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 9eb44041..8050637d 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -21,14 +21,12 @@ import ( "strings" ) -var ( - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} // Sample is a sample pair associated with a metric. A single sample must either // define Value or Histogram but not both. Histogram == nil implies the Value @@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 0f615a70..ae35cc2a 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -20,14 +20,12 @@ import ( "strconv" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} -) +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} // A SampleValue is a representation of a value for a given sample at a given // time. diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go index d96109fa..ada9f9e7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go @@ -507,8 +507,8 @@ func SerialNumberWithContext(ctx context.Context, name string) (string, error) { if err := unix.Stat(name, &stat); err != nil { return "", err } - major := unix.Major(stat.Rdev) - minor := unix.Minor(stat.Rdev) + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) sserial, _ := udevData(ctx, major, minor, "E:ID_SERIAL") if sserial != "" { @@ -541,8 +541,8 @@ func LabelWithContext(ctx context.Context, name string) (string, error) { if err := unix.Stat(common.HostDevWithContext(ctx, name), &stat); err != nil { return "", err } - major := unix.Major(stat.Rdev) - minor := unix.Minor(stat.Rdev) + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) label, err := udevData(ctx, major, minor, "E:ID_FS_LABEL") if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index 99ed6a58..5e25e507 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -343,7 +343,7 @@ func PathExistsWithContents(filename string) bool { if err != nil { return false } - return info.Size() > 4 // at least 4 bytes + return info.Size() > 4 && !info.IsDir() // at least 4 bytes } // GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go index a644687b..a429e16a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go @@ -12,10 +12,14 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" ) +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + func DoSysctrl(mib string) ([]string, error) { cmd := exec.Command("sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) @@ -56,7 +60,14 @@ func NumProcsWithContext(ctx context.Context) (uint64, error) { return cnt, nil } -func BootTimeWithContext(ctx context.Context) (uint64, error) { +func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) { + if enableCache { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + } + system, role, err := VirtualizationWithContext(ctx) if err != nil { return 0, err @@ -72,7 +83,13 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { } if useStatFile { - return readBootTimeStat(ctx) + t, err := readBootTimeStat(ctx) + if err != nil { + return 0, err + } + if enableCache { + atomic.StoreUint64(&cachedBootTime, t) + } } filename := HostProcWithContext(ctx, "uptime") @@ -90,6 +107,11 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { } currentTime := float64(time.Now().UnixNano()) / float64(time.Second) t := currentTime - b + + if enableCache { + atomic.StoreUint64(&cachedBootTime, uint64(t)) + } + return uint64(t), nil } @@ -307,7 +329,7 @@ func GetOSReleaseWithContext(ctx context.Context) (platform string, version stri switch field[0] { case "ID": // use ID for lowercase platform = trimQuotes(field[1]) - case "VERSION": + case "VERSION_ID": version = trimQuotes(field[1]) } } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v3/process/process.go index 1a7fe1b8..1bb27abf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process.go @@ -171,6 +171,13 @@ func (p NumCtxSwitchesStat) String() string { return string(s) } +var enableBootTimeCache bool + +// EnableBootTimeCache change cache behavior of BootTime. If true, cache BootTime value. Default is false. +func EnableBootTimeCache(enable bool) { + enableBootTimeCache = enable +} + // Pids returns a slice of process ID list which are running now. func Pids() ([]int32, error) { return PidsWithContext(context.Background()) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go index f7989cd2..557435b3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go @@ -1071,7 +1071,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui Iowait: iotime / float64(clockTicks), } - bootTime, _ := common.BootTimeWithContext(ctx) + bootTime, _ := common.BootTimeWithContext(ctx, enableBootTimeCache) t, err := strconv.ParseUint(fields[22], 10, 64) if err != nil { return 0, 0, nil, 0, 0, 0, nil, err diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go index 14ed0309..f2053d98 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go @@ -253,7 +253,7 @@ func pidsWithContext(ctx context.Context) ([]int32, error) { if err := windows.EnumProcesses(ps, &read); err != nil { return nil, err } - if uint32(len(ps)) == read { // ps buffer was too small to host every results, retry with a bigger one + if uint32(len(ps)) == read/dwordSize { // ps buffer was too small to host every results, retry with a bigger one psSize += 1024 continue } diff --git a/vendor/github.com/swaggo/swag/Dockerfile b/vendor/github.com/swaggo/swag/Dockerfile index 170d0c69..410fe31e 100644 --- a/vendor/github.com/swaggo/swag/Dockerfile +++ b/vendor/github.com/swaggo/swag/Dockerfile @@ -1,7 +1,7 @@ # Dockerfile References: https://docs.docker.com/engine/reference/builder/ # Start from the latest golang base image -FROM golang:1.18.3-alpine as builder +FROM golang:1.20-alpine as builder # Set the Current Working Directory inside the container WORKDIR /app @@ -22,7 +22,9 @@ RUN CGO_ENABLED=0 GOOS=linux go build -v -a -installsuffix cgo -o swag cmd/swag/ ######## Start a new stage from scratch ####### FROM scratch -WORKDIR /root/ +WORKDIR /code/ # Copy the Pre-built binary file from the previous stage -COPY --from=builder /app/swag . +COPY --from=builder /app/swag /bin/swag + +ENTRYPOINT ["/bin/swag"] diff --git a/vendor/github.com/swaggo/swag/Makefile b/vendor/github.com/swaggo/swag/Makefile index 04e6514e..0d8175da 100644 --- a/vendor/github.com/swaggo/swag/Makefile +++ b/vendor/github.com/swaggo/swag/Makefile @@ -6,6 +6,7 @@ GOBUILD:=$(GOCMD) build GOINSTALL:=$(GOCMD) install GOCLEAN:=$(GOCMD) clean GOTEST:=$(GOCMD) test +GOMODTIDY:=$(GOCMD) mod tidy GOGET:=$(GOCMD) get GOLIST:=$(GOCMD) list GOVET:=$(GOCMD) vet @@ -54,13 +55,7 @@ clean: .PHONY: deps deps: - $(GOGET) github.com/swaggo/cli - $(GOGET) sigs.k8s.io/yaml - $(GOGET) github.com/KyleBanks/depth - $(GOGET) github.com/go-openapi/jsonreference - $(GOGET) github.com/go-openapi/spec - $(GOGET) github.com/stretchr/testify/assert - $(GOGET) golang.org/x/tools/go/loader + $(GOMODTIDY) .PHONY: devel-deps devel-deps: diff --git a/vendor/github.com/swaggo/swag/README.md b/vendor/github.com/swaggo/swag/README.md index 98932c87..054e36ec 100644 --- a/vendor/github.com/swaggo/swag/README.md +++ b/vendor/github.com/swaggo/swag/README.md @@ -44,17 +44,23 @@ Swag converts Go annotations to Swagger Documentation 2.0. We've created a varie - [How to use security annotations](#how-to-use-security-annotations) - [Add a description for enum items](#add-a-description-for-enum-items) - [Generate only specific docs file types](#generate-only-specific-docs-file-types) + - [How to use Go generic types](#how-to-use-generics) - [About the Project](#about-the-project) ## Getting started 1. Add comments to your API source code, See [Declarative Comments Format](#declarative-comments-format). -2. Download swag by using: +2. Install swag by using: ```sh go install github.com/swaggo/swag/cmd/swag@latest ``` -To build from source you need [Go](https://golang.org/dl/) (1.17 or newer). +To build from source you need [Go](https://golang.org/dl/) (1.18 or newer). + +Alternatively you can run the docker image: +```sh +docker run --rm -v $(pwd):/code ghcr.io/swaggo/swag:latest +``` Or download a pre-compiled binary from the [release page](https://github.com/swaggo/swag/releases). @@ -64,6 +70,9 @@ swag init ``` Make sure to import the generated `docs/docs.go` so that your specific configuration gets `init`'ed. If your General API annotations do not live in `main.go`, you can let swag know with `-g` flag. + ```go + import _ "example-module-name/docs" + ``` ```sh swag init -g http/api.go ``` @@ -106,6 +115,7 @@ OPTIONS: --tags value, -t value A comma-separated list of tags to filter the APIs for which the documentation is generated.Special case if the tag is prefixed with the '!' character then the APIs with that tag will be excluded --templateDelims value, --td value Provide custom delimeters for Go template generation. The format is leftDelim,rightDelim. For example: "[[,]]" --collectionFormat value, --cf value Set default collection format (default: "csv") + --state value Initial state for the state machine (default: ""), @HostState in root file, @State in other files --help, -h show help (default: false) ``` @@ -142,6 +152,7 @@ OPTIONS: Find the example source code [here](https://github.com/swaggo/swag/tree/master/example/celler). +Finish the steps in [Getting started](#getting-started) 1. After using `swag init` to generate Swagger 2.0 docs, import the following packages: ```go import "github.com/swaggo/gin-swagger" // gin-swagger middleware @@ -414,25 +425,26 @@ When a short string in your documentation is insufficient, or you need images, c [celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) -| annotation | description | -|-------------|----------------------------------------------------------------------------------------------------------------------------| -| description | A verbose explanation of the operation behavior. | -| description.markdown | A short description of the application. The description will be read from a file. E.g. `@description.markdown details` will load `details.md`| // @description.file endpoint.description.markdown | -| id | A unique string used to identify the operation. Must be unique among all API operations. | -| tags | A list of tags to each API operation that separated by commas. | -| summary | A short summary of what the operation does. | -| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | -| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | -| param | Parameters that separated by spaces. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | -| security | [Security](#security) to each API operation. | -| success | Success response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | -| failure | Failure response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | -| response | As same as `success` and `failure` | -| header | Header in response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | -| router | Path definition that separated by spaces. `path`,`[httpMethod]` | -| x-name | The extension key, must be start by x- and take only json value. | -| x-codeSample | Optional Markdown usage. take `file` as parameter. This will then search for a file named like the summary in the given folder. | -| deprecated | Mark endpoint as deprecated. | +| annotation | description | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| description | A verbose explanation of the operation behavior. | +| description.markdown | A short description of the application. The description will be read from a file. E.g. `@description.markdown details` will load `details.md` | // @description.file endpoint.description.markdown | +| id | A unique string used to identify the operation. Must be unique among all API operations. | +| tags | A list of tags to each API operation that separated by commas. | +| summary | A short summary of what the operation does. | +| accept | A list of MIME types the APIs can consume. Note that Accept only affects operations with a request body, such as POST, PUT and PATCH. Value MUST be as described under [Mime Types](#mime-types). | +| produce | A list of MIME types the APIs can produce. Value MUST be as described under [Mime Types](#mime-types). | +| param | Parameters that separated by spaces. `param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | +| security | [Security](#security) to each API operation. | +| success | Success response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| failure | Failure response that separated by spaces. `return code or default`,`{param type}`,`data type`,`comment` | +| response | As same as `success` and `failure` | +| header | Header in response that separated by spaces. `return code`,`{param type}`,`data type`,`comment` | +| router | Path definition that separated by spaces. `path`,`[httpMethod]` | +| deprecatedrouter | As same as router, but deprecated. | +| x-name | The extension key, must be start by x- and take only json value. | +| x-codeSample | Optional Markdown usage. take `file` as parameter. This will then search for a file named like the summary in the given folder. | +| deprecated | Mark endpoint as deprecated. | @@ -909,6 +921,19 @@ By default `swag` command generates Swagger specification in three different fil If you would like to limit a set of file types which should be generated you can use `--outputTypes` (short `-ot`) flag. Default value is `go,json,yaml` - output types separated with comma. To limit output only to `go` and `yaml` files, you would write `go,yaml`. With complete command that would be `swag init --outputTypes go,yaml`. +### How to use Generics + +```go +// @Success 200 {object} web.GenericNestedResponse[types.Post] +// @Success 204 {object} web.GenericNestedResponse[types.Post, Types.AnotherOne] +// @Success 201 {object} web.GenericNestedResponse[web.GenericInnerType[types.Post]] +func GetPosts(w http.ResponseWriter, r *http.Request) { + _ = web.GenericNestedResponse[types.Post]{} +} +``` +See [this file](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) for more details +and other examples. + ### Change the default Go Template action delimiters [#980](https://github.com/swaggo/swag/issues/980) [#1177](https://github.com/swaggo/swag/issues/1177) diff --git a/vendor/github.com/swaggo/swag/README_pt.md b/vendor/github.com/swaggo/swag/README_pt.md index 1415e9c8..7f95066b 100644 --- a/vendor/github.com/swaggo/swag/README_pt.md +++ b/vendor/github.com/swaggo/swag/README_pt.md @@ -43,6 +43,7 @@ Swag converte anotações Go para Documentação Swagger 2.0. Criámos uma varie - [Como utilizar as anotações de segurança](#como-utilizar-as-anotações-de-segurança) - [Adicionar uma descrição para enumerar artigos](#add-a-description-for-enum-items) - [Gerar apenas tipos de ficheiros de documentos específicos](#generate-only-specific-docs-file-file-types) + - [Como usar tipos genéricos](#como-usar-tipos-genéricos) - [Sobre o projecto](#sobre-o-projecto) ## Começando @@ -53,7 +54,7 @@ Swag converte anotações Go para Documentação Swagger 2.0. Criámos uma varie ```sh go install github.com/swaggo/swag/cmd/swag@latest ``` -Para construir a partir da fonte é necessário [Go](https://golang.org/dl/) (1.17 ou mais recente). +Para construir a partir da fonte é necessário [Go](https://golang.org/dl/) (1.18 ou mais recente). Ou descarregar um binário pré-compilado a partir da [página de lançamento](https://github.com/swaggo/swag/releases). @@ -905,6 +906,18 @@ Por defeito, o comando `swag` gera especificação Swagger em três tipos difere Se desejar limitar um conjunto de tipos de ficheiros que devem ser gerados pode utilizar a bandeira `--outputTypes` (short `-ot`). O valor por defeito é `go,json,yaml` - tipos de saída separados por vírgula. Para limitar a saída apenas a ficheiros `go` e `yaml`, escrever-se-ia `go,yaml'. Com comando completo que seria `swag init --outputTypes go,yaml`. +### Como usar tipos genéricos + +```go +// @Success 200 {object} web.GenericNestedResponse[types.Post] +// @Success 204 {object} web.GenericNestedResponse[types.Post, Types.AnotherOne] +// @Success 201 {object} web.GenericNestedResponse[web.GenericInnerType[types.Post]] +func GetPosts(w http.ResponseWriter, r *http.Request) { + _ = web.GenericNestedResponse[types.Post]{} +} +``` +Para mais detalhes e outros exemplos, veja [esse arquivo](https://github.com/swaggo/swag/blob/master/testdata/generics_nested/api/api.go) + ### Alterar os delimitadores de acção padrão Go Template [#980](https://github.com/swaggo/swag/issues/980) [#1177](https://github.com/swaggo/swag/issues/1177) diff --git a/vendor/github.com/swaggo/swag/README_zh-CN.md b/vendor/github.com/swaggo/swag/README_zh-CN.md index 755a7f0b..87d600b5 100644 --- a/vendor/github.com/swaggo/swag/README_zh-CN.md +++ b/vendor/github.com/swaggo/swag/README_zh-CN.md @@ -50,7 +50,7 @@ Swag将Go的注释转换为Swagger2.0文档。我们为流行的 [Go Web Framewo go install github.com/swaggo/swag/cmd/swag@latest ``` -从源码开始构建的话,需要有Go环境(1.17及以上版本)。 +从源码开始构建的话,需要有Go环境(1.18及以上版本)。 或者从github的release页面下载预编译好的二进制文件。 @@ -378,23 +378,25 @@ swag fmt -d ./ --exclude ./internal Example [celler/controller](https://github.com/swaggo/swag/tree/master/example/celler/controller) -| 注释 | 描述 | -| -------------------- | ------------------------------------------------------------------------------------------------------- | -| description | 操作行为的详细说明。 | -| description.markdown | 应用程序的简短描述。该描述将从名为`endpointname.md`的文件中读取。 | -| id | 用于标识操作的唯一字符串。在所有API操作中必须唯一。 | -| tags | 每个API操作的标签列表,以逗号分隔。 | +| 注释 | 描述 | +|----------------------|------------------------------------------------------------------------------------------------| +| description | 操作行为的详细说明。 | +| description.markdown | 应用程序的简短描述。该描述将从名为`endpointname.md`的文件中读取。 | +| id | 用于标识操作的唯一字符串。在所有API操作中必须唯一。 | +| tags | 每个API操作的标签列表,以逗号分隔。 | | summary | 该操作的简短摘要。 | -| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime类型)”中所述。 | -| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime类型)”中所述。 | +| accept | API 可以使用的 MIME 类型列表。 请注意,Accept 仅影响具有请求正文的操作,例如 POST、PUT 和 PATCH。 值必须如“[Mime类型](#mime类型)”中所述。 | +| produce | API可以生成的MIME类型的列表。值必须如“[Mime类型](#mime类型)”中所述。 | | param | 用空格分隔的参数。`param name`,`param type`,`data type`,`is mandatory?`,`comment` `attribute(optional)` | -| security | 每个API操作的[安全性](#安全性)。 | -| success | 以空格分隔的成功响应。`return code`,`{param type}`,`data type`,`comment` | -| failure | 以空格分隔的故障响应。`return code`,`{param type}`,`data type`,`comment` | -| response | 与success、failure作用相同 | -| header | 以空格分隔的头字段。 `return code`,`{param type}`,`data type`,`comment` | -| router | 以空格分隔的路径定义。 `path`,`[httpMethod]` | -| x-name | 扩展字段必须以`x-`开头,并且只能使用json值。 | +| security | 每个API操作的[安全性](#安全性)。 | +| success | 以空格分隔的成功响应。`return code`,`{param type}`,`data type`,`comment` | +| failure | 以空格分隔的故障响应。`return code`,`{param type}`,`data type`,`comment` | +| response | 与success、failure作用相同 | +| header | 以空格分隔的头字段。 `return code`,`{param type}`,`data type`,`comment` | +| router | 以空格分隔的路径定义。 `path`,`[httpMethod]` | +| deprecatedrouter | 与router相同,但是是deprecated的。 | +| x-name | 扩展字段必须以`x-`开头,并且只能使用json值。 | +| deprecated | 将当前API操作的所有路径设置为deprecated | ## Mime类型 diff --git a/vendor/github.com/swaggo/swag/const.go b/vendor/github.com/swaggo/swag/const.go index 1353b6d1..83755103 100644 --- a/vendor/github.com/swaggo/swag/const.go +++ b/vendor/github.com/swaggo/swag/const.go @@ -6,6 +6,7 @@ import ( "reflect" "strconv" "strings" + "unicode/utf8" ) // ConstVariable a model to record a const variable @@ -60,7 +61,7 @@ func EvaluateEscapedString(text string) string { i++ char, err := strconv.ParseInt(text[i:i+4], 16, 32) if err == nil { - result = AppendUtf8Rune(result, rune(char)) + result = utf8.AppendRune(result, rune(char)) } i += 3 } else if c, ok := escapedChars[text[i]]; ok { @@ -404,7 +405,7 @@ func EvaluateUnary(x interface{}, operator token.Token, xtype ast.Expr) (interfa func EvaluateBinary(x, y interface{}, operator token.Token, xtype, ytype ast.Expr) (interface{}, ast.Expr) { if operator == token.SHR || operator == token.SHL { var rightOperand uint64 - yValue := CanIntegerValue{reflect.ValueOf(y)} + yValue := reflect.ValueOf(y) if yValue.CanUint() { rightOperand = yValue.Uint() } else if yValue.CanInt() { @@ -467,8 +468,8 @@ func EvaluateBinary(x, y interface{}, operator token.Token, xtype, ytype ast.Exp evalType = ytype } - xValue := CanIntegerValue{reflect.ValueOf(x)} - yValue := CanIntegerValue{reflect.ValueOf(y)} + xValue := reflect.ValueOf(x) + yValue := reflect.ValueOf(y) if xValue.Kind() == reflect.String && yValue.Kind() == reflect.String { return xValue.String() + yValue.String(), evalType } diff --git a/vendor/github.com/swaggo/swag/field_parser.go b/vendor/github.com/swaggo/swag/field_parser.go index 98ad0ddc..9b24e787 100644 --- a/vendor/github.com/swaggo/swag/field_parser.go +++ b/vendor/github.com/swaggo/swag/field_parser.go @@ -96,13 +96,25 @@ func (ps *tagBaseFieldParser) FieldName() (string, error) { } } -func (ps *tagBaseFieldParser) FormName() string { +func (ps *tagBaseFieldParser) firstTagValue(tag string) string { if ps.field.Tag != nil { - return strings.TrimSpace(strings.Split(ps.tag.Get(formTag), ",")[0]) + return strings.TrimRight(strings.TrimSpace(strings.Split(ps.tag.Get(tag), ",")[0]), "[]") } return "" } +func (ps *tagBaseFieldParser) FormName() string { + return ps.firstTagValue(formTag) +} + +func (ps *tagBaseFieldParser) HeaderName() string { + return ps.firstTagValue(headerTag) +} + +func (ps *tagBaseFieldParser) PathName() string { + return ps.firstTagValue(uriTag) +} + func toSnakeCase(in string) string { var ( runes = []rune(in) diff --git a/vendor/github.com/swaggo/swag/generics.go b/vendor/github.com/swaggo/swag/generics.go index bb6a0996..07344bba 100644 --- a/vendor/github.com/swaggo/swag/generics.go +++ b/vendor/github.com/swaggo/swag/generics.go @@ -14,9 +14,8 @@ import ( ) type genericTypeSpec struct { - ArrayDepth int - TypeSpec *TypeSpecDef - Name string + TypeSpec *TypeSpecDef + Name string } type formalParamType struct { @@ -31,6 +30,87 @@ func (t *genericTypeSpec) TypeName() string { return t.Name } +func normalizeGenericTypeName(name string) string { + return strings.Replace(name, ".", "_", -1) +} + +func (pkgDefs *PackagesDefinitions) getTypeFromGenericParam(genericParam string, file *ast.File) (typeSpecDef *TypeSpecDef) { + if strings.HasPrefix(genericParam, "[]") { + typeSpecDef = pkgDefs.getTypeFromGenericParam(genericParam[2:], file) + if typeSpecDef == nil { + return nil + } + var expr ast.Expr + switch typeSpecDef.TypeSpec.Type.(type) { + case *ast.ArrayType, *ast.MapType: + expr = typeSpecDef.TypeSpec.Type + default: + name := typeSpecDef.TypeName() + expr = ast.NewIdent(name) + if _, ok := pkgDefs.uniqueDefinitions[name]; !ok { + pkgDefs.uniqueDefinitions[name] = typeSpecDef + } + } + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(string(IgnoreNameOverridePrefix) + "array_" + typeSpecDef.TypeName()), + Type: &ast.ArrayType{ + Elt: expr, + }, + }, + Enums: typeSpecDef.Enums, + PkgPath: typeSpecDef.PkgPath, + ParentSpec: typeSpecDef.ParentSpec, + NotUnique: false, + } + } + + if strings.HasPrefix(genericParam, "map[") { + parts := strings.SplitN(genericParam[4:], "]", 2) + if len(parts) != 2 { + return nil + } + typeSpecDef = pkgDefs.getTypeFromGenericParam(parts[1], file) + if typeSpecDef == nil { + return nil + } + var expr ast.Expr + switch typeSpecDef.TypeSpec.Type.(type) { + case *ast.ArrayType, *ast.MapType: + expr = typeSpecDef.TypeSpec.Type + default: + name := typeSpecDef.TypeName() + expr = ast.NewIdent(name) + if _, ok := pkgDefs.uniqueDefinitions[name]; !ok { + pkgDefs.uniqueDefinitions[name] = typeSpecDef + } + } + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(string(IgnoreNameOverridePrefix) + "map_" + parts[0] + "_" + typeSpecDef.TypeName()), + Type: &ast.MapType{ + Key: ast.NewIdent(parts[0]), //assume key is string or integer + Value: expr, + }, + }, + Enums: typeSpecDef.Enums, + PkgPath: typeSpecDef.PkgPath, + ParentSpec: typeSpecDef.ParentSpec, + NotUnique: false, + } + + } + if IsGolangPrimitiveType(genericParam) { + return &TypeSpecDef{ + TypeSpec: &ast.TypeSpec{ + Name: ast.NewIdent(genericParam), + Type: ast.NewIdent(genericParam), + }, + } + } + return pkgDefs.FindTypeSpec(genericParam, file) +} + func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, original *TypeSpecDef, fullGenericForm string) *TypeSpecDef { if original == nil || original.TypeSpec.TypeParams == nil || len(original.TypeSpec.TypeParams.List) == 0 { return original @@ -58,27 +138,19 @@ func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, origi genericParamTypeDefs := map[string]*genericTypeSpec{} for i, genericParam := range genericParams { - arrayDepth := 0 - for { - if len(genericParam) <= 2 || genericParam[:2] != "[]" { - break - } - genericParam = genericParam[2:] - arrayDepth++ - } - - typeDef := pkgDefs.FindTypeSpec(genericParam, file) - if typeDef != nil { - genericParam = typeDef.TypeName() - if _, ok := pkgDefs.uniqueDefinitions[genericParam]; !ok { - pkgDefs.uniqueDefinitions[genericParam] = typeDef + var typeDef *TypeSpecDef + if !IsGolangPrimitiveType(genericParam) { + typeDef = pkgDefs.getTypeFromGenericParam(genericParam, file) + if typeDef != nil { + genericParam = typeDef.TypeName() + if _, ok := pkgDefs.uniqueDefinitions[genericParam]; !ok { + pkgDefs.uniqueDefinitions[genericParam] = typeDef + } } } - genericParamTypeDefs[formals[i].Name] = &genericTypeSpec{ - ArrayDepth: arrayDepth, - TypeSpec: typeDef, - Name: genericParam, + TypeSpec: typeDef, + Name: genericParam, } } @@ -86,17 +158,11 @@ func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, origi var nameParts []string for _, def := range formals { if specDef, ok := genericParamTypeDefs[def.Name]; ok { - var prefix = "" - if specDef.ArrayDepth == 1 { - prefix = "array_" - } else if specDef.ArrayDepth > 1 { - prefix = fmt.Sprintf("array%d_", specDef.ArrayDepth) - } - nameParts = append(nameParts, prefix+specDef.TypeName()) + nameParts = append(nameParts, specDef.TypeName()) } } - name += strings.Replace(strings.Join(nameParts, "-"), ".", "_", -1) + name += normalizeGenericTypeName(strings.Join(nameParts, "-")) if typeSpec, ok := pkgDefs.uniqueDefinitions[name]; ok { return typeSpec @@ -180,11 +246,7 @@ func (pkgDefs *PackagesDefinitions) resolveGenericType(file *ast.File, expr ast. switch astExpr := expr.(type) { case *ast.Ident: if genTypeSpec, ok := genericParamTypeDefs[astExpr.Name]; ok { - retType := pkgDefs.getParametrizedType(genTypeSpec) - for i := 0; i < genTypeSpec.ArrayDepth; i++ { - retType = &ast.ArrayType{Elt: retType} - } - return retType + return pkgDefs.getParametrizedType(genTypeSpec) } case *ast.ArrayType: return &ast.ArrayType{ diff --git a/vendor/github.com/swaggo/swag/generics_other.go b/vendor/github.com/swaggo/swag/generics_other.go deleted file mode 100644 index 5fd9e823..00000000 --- a/vendor/github.com/swaggo/swag/generics_other.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !go1.18 -// +build !go1.18 - -package swag - -import ( - "fmt" - "github.com/go-openapi/spec" - "go/ast" -) - -type genericTypeSpec struct { - ArrayDepth int - TypeSpec *TypeSpecDef - Name string -} - -func (pkgDefs *PackagesDefinitions) parametrizeGenericType(file *ast.File, original *TypeSpecDef, fullGenericForm string) *TypeSpecDef { - return original -} - -func getGenericFieldType(file *ast.File, field ast.Expr, genericParamTypeDefs map[string]*genericTypeSpec) (string, error) { - return "", fmt.Errorf("unknown field type %#v", field) -} - -func (parser *Parser) parseGenericTypeExpr(file *ast.File, typeExpr ast.Expr) (*spec.Schema, error) { - switch typeExpr.(type) { - // suppress debug messages for these types - case *ast.InterfaceType: - case *ast.StructType: - case *ast.Ident: - case *ast.StarExpr: - case *ast.SelectorExpr: - case *ast.ArrayType: - case *ast.MapType: - case *ast.FuncType: - default: - parser.debug.Printf("Type definition of type '%T' is not supported yet. Using 'object' instead.\n", typeExpr) - } - - return PrimitiveSchema(OBJECT), nil -} diff --git a/vendor/github.com/swaggo/swag/operation.go b/vendor/github.com/swaggo/swag/operation.go index edf25100..169510ff 100644 --- a/vendor/github.com/swaggo/swag/operation.go +++ b/vendor/github.com/swaggo/swag/operation.go @@ -21,6 +21,7 @@ import ( type RouteProperties struct { HTTPMethod string Path string + Deprecated bool } // Operation describes a single API operation on a path. @@ -30,6 +31,7 @@ type Operation struct { codeExampleFilesDir string spec.Operation RouterProperties []RouteProperties + State string } var mimeTypeAliases = map[string]string{ @@ -118,6 +120,8 @@ func (operation *Operation) ParseComment(comment string, astFile *ast.File) erro lineRemainder = fields[1] } switch lowerAttribute { + case stateAttr: + operation.ParseStateComment(lineRemainder) case descriptionAttr: operation.ParseDescriptionComment(lineRemainder) case descriptionMarkdownAttr: @@ -144,7 +148,9 @@ func (operation *Operation) ParseComment(comment string, astFile *ast.File) erro case headerAttr: return operation.ParseResponseHeaderComment(lineRemainder, astFile) case routerAttr: - return operation.ParseRouterComment(lineRemainder) + return operation.ParseRouterComment(lineRemainder, false) + case deprecatedRouterAttr: + return operation.ParseRouterComment(lineRemainder, true) case securityAttr: return operation.ParseSecurityComment(lineRemainder) case deprecatedAttr: @@ -158,7 +164,7 @@ func (operation *Operation) ParseComment(comment string, astFile *ast.File) erro return nil } -// ParseCodeSample godoc. +// ParseCodeSample parse code sample. func (operation *Operation) ParseCodeSample(attribute, _, lineRemainder string) error { if lineRemainder == "file" { data, err := getCodeExampleForSummary(operation.Summary, operation.codeExampleFilesDir) @@ -183,7 +189,12 @@ func (operation *Operation) ParseCodeSample(attribute, _, lineRemainder string) return operation.ParseMetadata(attribute, strings.ToLower(attribute), lineRemainder) } -// ParseDescriptionComment godoc. +// ParseStateComment parse state comment. +func (operation *Operation) ParseStateComment(lineRemainder string) { + operation.State = lineRemainder +} + +// ParseDescriptionComment parse description comment. func (operation *Operation) ParseDescriptionComment(lineRemainder string) { if operation.Description == "" { operation.Description = lineRemainder @@ -194,7 +205,7 @@ func (operation *Operation) ParseDescriptionComment(lineRemainder string) { operation.Description += "\n" + lineRemainder } -// ParseMetadata godoc. +// ParseMetadata parse metadata. func (operation *Operation) ParseMetadata(attribute, lowerAttribute, lineRemainder string) error { // parsing specific meta data extensions if strings.HasPrefix(lowerAttribute, "@x-") { @@ -275,16 +286,7 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F param := createParameter(paramType, description, name, objectType, refType, required, enums, operation.parser.collectionFormatInQuery) switch paramType { - case "path", "header": - switch objectType { - case ARRAY: - if !IsPrimitiveType(refType) { - return fmt.Errorf("%s is not supported array type for %s", refType, paramType) - } - case OBJECT: - return fmt.Errorf("%s is not supported type for %s", refType, paramType) - } - case "query", "formData": + case "path", "header", "query", "formData": switch objectType { case ARRAY: if !IsPrimitiveType(refType) && !(refType == "file" && paramType == "formData") { @@ -313,11 +315,14 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F } } - var formName = name - if item.Schema.Extensions != nil { - if nameVal, ok := item.Schema.Extensions[formTag]; ok { - formName = nameVal.(string) - } + nameOverrideType := paramType + // query also uses formData tags + if paramType == "query" { + nameOverrideType = "formData" + } + // load overridden type specific name from extensions if exists + if nameVal, ok := item.Schema.Extensions[nameOverrideType]; ok { + name = nameVal.(string) } switch { @@ -335,10 +340,10 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F if !IsSimplePrimitiveType(itemSchema.Type[0]) { continue } - param = createParameter(paramType, prop.Description, formName, prop.Type[0], itemSchema.Type[0], findInSlice(schema.Required, name), itemSchema.Enum, operation.parser.collectionFormatInQuery) + param = createParameter(paramType, prop.Description, name, prop.Type[0], itemSchema.Type[0], findInSlice(schema.Required, item.Name), itemSchema.Enum, operation.parser.collectionFormatInQuery) case IsSimplePrimitiveType(prop.Type[0]): - param = createParameter(paramType, prop.Description, formName, PRIMITIVE, prop.Type[0], findInSlice(schema.Required, name), nil, operation.parser.collectionFormatInQuery) + param = createParameter(paramType, prop.Description, name, PRIMITIVE, prop.Type[0], findInSlice(schema.Required, item.Name), nil, operation.parser.collectionFormatInQuery) default: operation.parser.debug.Printf("skip field [%s] in %s is not supported type for %s", name, refType, paramType) continue @@ -395,6 +400,8 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F const ( formTag = "form" jsonTag = "json" + uriTag = "uri" + headerTag = "header" bindingTag = "binding" defaultTag = "default" enumsTag = "enums" @@ -699,7 +706,7 @@ func parseMimeTypeList(mimeTypeList string, typeList *[]string, format string) e var routerPattern = regexp.MustCompile(`^(/[\w./\-{}+:$]*)[[:blank:]]+\[(\w+)]`) // ParseRouterComment parses comment for given `router` comment string. -func (operation *Operation) ParseRouterComment(commentLine string) error { +func (operation *Operation) ParseRouterComment(commentLine string, deprecated bool) error { matches := routerPattern.FindStringSubmatch(commentLine) if len(matches) != 3 { return fmt.Errorf("can not parse router comment \"%s\"", commentLine) @@ -708,6 +715,7 @@ func (operation *Operation) ParseRouterComment(commentLine string) error { signature := RouteProperties{ Path: matches[1], HTTPMethod: strings.ToUpper(matches[2]), + Deprecated: deprecated, } if _, ok := allMethod[signature.HTTPMethod]; !ok { @@ -918,6 +926,10 @@ func parseCombinedObjectSchema(parser *Parser, refType string, astFile *ast.File return nil, err } + if schema == nil { + schema = PrimitiveSchema(OBJECT) + } + props[keyVal[0]] = *schema } } diff --git a/vendor/github.com/swaggo/swag/packages.go b/vendor/github.com/swaggo/swag/packages.go index b897ea97..69a1b052 100644 --- a/vendor/github.com/swaggo/swag/packages.go +++ b/vendor/github.com/swaggo/swag/packages.go @@ -354,7 +354,7 @@ func (pkgDefs *PackagesDefinitions) collectConstEnums(parsedSchemas map[*TypeSpe } //delete it from parsed schemas, and will parse it again - if _, ok := parsedSchemas[typeDef]; ok { + if _, ok = parsedSchemas[typeDef]; ok { delete(parsedSchemas, typeDef) } @@ -363,7 +363,7 @@ func (pkgDefs *PackagesDefinitions) collectConstEnums(parsedSchemas map[*TypeSpe } name := constVar.Name.Name - if _, ok := constVar.Value.(ast.Expr); ok { + if _, ok = constVar.Value.(ast.Expr); ok { continue } @@ -506,13 +506,6 @@ func (pkgDefs *PackagesDefinitions) findPackagePathFromImports(pkg string, file } func (pkgDefs *PackagesDefinitions) findTypeSpecFromPackagePaths(matchedPkgPaths, externalPkgPaths []string, name string) (typeDef *TypeSpecDef) { - for _, pkgPath := range matchedPkgPaths { - typeDef = pkgDefs.findTypeSpec(pkgPath, name) - if typeDef != nil { - return typeDef - } - } - if pkgDefs.parseDependency > 0 { for _, pkgPath := range externalPkgPaths { if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { @@ -524,6 +517,13 @@ func (pkgDefs *PackagesDefinitions) findTypeSpecFromPackagePaths(matchedPkgPaths } } + for _, pkgPath := range matchedPkgPaths { + typeDef = pkgDefs.findTypeSpec(pkgPath, name) + if typeDef != nil { + return typeDef + } + } + return typeDef } @@ -542,13 +542,14 @@ func (pkgDefs *PackagesDefinitions) FindTypeSpec(typeName string, file *ast.File parts := strings.Split(strings.Split(typeName, "[")[0], ".") if len(parts) > 1 { - typeDef, ok := pkgDefs.uniqueDefinitions[typeName] - if ok { - return typeDef - } - pkgPaths, externalPkgPaths := pkgDefs.findPackagePathFromImports(parts[0], file) - typeDef = pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, parts[1]) + if len(externalPkgPaths) == 0 || pkgDefs.parseDependency == ParseNone { + typeDef, ok := pkgDefs.uniqueDefinitions[typeName] + if ok { + return typeDef + } + } + typeDef := pkgDefs.findTypeSpecFromPackagePaths(pkgPaths, externalPkgPaths, parts[1]) return pkgDefs.parametrizeGenericType(file, typeDef, typeName) } diff --git a/vendor/github.com/swaggo/swag/parser.go b/vendor/github.com/swaggo/swag/parser.go index 6bf991e9..604f8278 100644 --- a/vendor/github.com/swaggo/swag/parser.go +++ b/vendor/github.com/swaggo/swag/parser.go @@ -43,6 +43,7 @@ const ( headerAttr = "@header" tagsAttr = "@tags" routerAttr = "@router" + deprecatedRouterAttr = "@deprecatedrouter" summaryAttr = "@summary" deprecatedAttr = "@deprecated" securityAttr = "@security" @@ -66,6 +67,7 @@ const ( extDocsURLAttr = "@externaldocs.url" xCodeSamplesAttr = "@x-codesamples" scopeAttrPrefix = "@scope." + stateAttr = "@state" ) // ParseFlag determine what to parse @@ -174,6 +176,9 @@ type Parser struct { // tags to filter the APIs after tags map[string]struct{} + + // HostState is the state of the host + HostState string } // FieldParserFactory create FieldParser. @@ -184,6 +189,8 @@ type FieldParser interface { ShouldSkip() bool FieldName() (string, error) FormName() string + HeaderName() string + PathName() string CustomSchema() (*spec.Schema, error) ComplementSchema(schema *spec.Schema) error IsRequired() (bool, error) @@ -541,6 +548,14 @@ func parseGeneralAPIInfo(parser *Parser, comments []string) error { case "@host": parser.swagger.Host = value + case "@hoststate": + fields = FieldsByAnySpace(commentLine, 3) + if len(fields) != 3 { + return fmt.Errorf("%s needs 3 arguments", attribute) + } + if parser.HostState == fields[1] { + parser.swagger.Host = fields[2] + } case "@basepath": parser.swagger.BasePath = value @@ -722,6 +737,7 @@ func parseSecAttributes(context string, lines []string, index *int) (*spec.Secur attrMap, scopes := make(map[string]string), make(map[string]string) extensions, description := make(map[string]interface{}), "" +loopline: for ; *index < len(lines); *index++ { v := strings.TrimSpace(lines[*index]) if len(v) == 0 { @@ -738,23 +754,21 @@ func parseSecAttributes(context string, lines []string, index *int) (*spec.Secur for _, findterm := range search { if securityAttr == findterm { attrMap[securityAttr] = value - - break + continue loopline } } - isExists, err := isExistsScope(securityAttr) - if err != nil { + if isExists, err := isExistsScope(securityAttr); err != nil { return nil, err - } - - if isExists { - scopes[securityAttr[len(scopeAttrPrefix):]] = v[len(securityAttr):] + } else if isExists { + scopes[securityAttr[len(scopeAttrPrefix):]] = value + continue } if strings.HasPrefix(securityAttr, "@x-") { // Add the custom attribute without the @ extensions[securityAttr[1:]] = value + continue } // Not mandatory field @@ -901,14 +915,14 @@ func getMarkdownForTag(tagName string, dirPath string) ([]byte, error) { func isExistsScope(scope string) (bool, error) { s := strings.Fields(scope) for _, v := range s { - if strings.Contains(v, scopeAttrPrefix) { + if strings.HasPrefix(v, scopeAttrPrefix) { if strings.Contains(v, ",") { return false, fmt.Errorf("@scope can't use comma(,) get=" + v) } } } - return strings.Contains(scope, scopeAttrPrefix), nil + return strings.HasPrefix(scope, scopeAttrPrefix), nil } func getTagsFromComment(comment string) (tags []string) { @@ -977,6 +991,7 @@ func matchExtension(extensionToMatch string, comments []*ast.Comment) (match boo // ParseRouterAPIInfo parses router api info for given astFile. func (parser *Parser) ParseRouterAPIInfo(fileInfo *AstFileInfo) error { +DeclsLoop: for _, astDescription := range fileInfo.File.Decls { if (fileInfo.ParseFlag & ParseOperations) == ParseNone { continue @@ -992,6 +1007,9 @@ func (parser *Parser) ParseRouterAPIInfo(fileInfo *AstFileInfo) error { if err != nil { return fmt.Errorf("ParseComment error in file %s :%+v", fileInfo.Path, err) } + if operation.State != "" && operation.State != parser.HostState { + continue DeclsLoop + } } err := processRouterOperation(parser, operation) if err != nil { @@ -1065,6 +1083,10 @@ func processRouterOperation(parser *Parser, operation *Operation) error { *op = &operation.Operation } + if routeProperties.Deprecated { + (*op).Deprecated = routeProperties.Deprecated + } + parser.swagger.Paths.Paths[routeProperties.Path] = pathItem } @@ -1261,6 +1283,9 @@ func fullTypeName(parts ...string) string { // fillDefinitionDescription additionally fills fields in definition (spec.Schema) // TODO: If .go file contains many types, it may work for a long time func fillDefinitionDescription(definition *spec.Schema, file *ast.File, typeSpecDef *TypeSpecDef) { + if file == nil { + return + } for _, astDeclaration := range file.Decls { generalDeclaration, ok := astDeclaration.(*ast.GenDecl) if !ok || generalDeclaration.Tok != token.TYPE { @@ -1485,11 +1510,17 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st tagRequired = append(tagRequired, fieldName) } + if schema.Extensions == nil { + schema.Extensions = make(spec.Extensions) + } if formName := ps.FormName(); len(formName) > 0 { - if schema.Extensions == nil { - schema.Extensions = make(spec.Extensions) - } - schema.Extensions[formTag] = formName + schema.Extensions["formData"] = formName + } + if headerName := ps.HeaderName(); len(headerName) > 0 { + schema.Extensions["header"] = headerName + } + if pathName := ps.PathName(); len(pathName) > 0 { + schema.Extensions["path"] = pathName } return map[string]spec.Schema{fieldName: *schema}, tagRequired, nil diff --git a/vendor/github.com/swaggo/swag/types.go b/vendor/github.com/swaggo/swag/types.go index ebb373d7..0076a6b4 100644 --- a/vendor/github.com/swaggo/swag/types.go +++ b/vendor/github.com/swaggo/swag/types.go @@ -35,7 +35,7 @@ type TypeSpecDef struct { // Name the name of the typeSpec. func (t *TypeSpecDef) Name() string { - if t.TypeSpec != nil { + if t.TypeSpec != nil && t.TypeSpec.Name != nil { return t.TypeSpec.Name.Name } @@ -71,7 +71,7 @@ func (t *TypeSpecDef) TypeName() string { return r }, t.PkgPath) names = append(names, pkgPath) - } else { + } else if t.File != nil { names = append(names, t.File.Name.Name) } if parentFun, ok := (t.ParentSpec).(*ast.FuncDecl); ok && parentFun != nil { diff --git a/vendor/github.com/swaggo/swag/utils_go18.go b/vendor/github.com/swaggo/swag/utils_go18.go deleted file mode 100644 index 814f9343..00000000 --- a/vendor/github.com/swaggo/swag/utils_go18.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package swag - -import ( - "reflect" - "unicode/utf8" -) - -// AppendUtf8Rune appends the UTF-8 encoding of r to the end of p and -// returns the extended buffer. If the rune is out of range, -// it appends the encoding of RuneError. -func AppendUtf8Rune(p []byte, r rune) []byte { - return utf8.AppendRune(p, r) -} - -// CanIntegerValue a wrapper of reflect.Value -type CanIntegerValue struct { - reflect.Value -} - -// CanInt reports whether Uint can be used without panicking. -func (v CanIntegerValue) CanInt() bool { - return v.Value.CanInt() -} - -// CanUint reports whether Uint can be used without panicking. -func (v CanIntegerValue) CanUint() bool { - return v.Value.CanUint() -} diff --git a/vendor/github.com/swaggo/swag/utils_other.go b/vendor/github.com/swaggo/swag/utils_other.go deleted file mode 100644 index 531c0df1..00000000 --- a/vendor/github.com/swaggo/swag/utils_other.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build !go1.18 -// +build !go1.18 - -package swag - -import ( - "reflect" - "unicode/utf8" -) - -// AppendUtf8Rune appends the UTF-8 encoding of r to the end of p and -// returns the extended buffer. If the rune is out of range, -// it appends the encoding of RuneError. -func AppendUtf8Rune(p []byte, r rune) []byte { - length := utf8.RuneLen(rune(r)) - if length > 0 { - utf8Slice := make([]byte, length) - utf8.EncodeRune(utf8Slice, rune(r)) - p = append(p, utf8Slice...) - } - return p -} - -// CanIntegerValue a wrapper of reflect.Value -type CanIntegerValue struct { - reflect.Value -} - -// CanInt reports whether Uint can be used without panicking. -func (v CanIntegerValue) CanInt() bool { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - default: - return false - } -} - -// CanUint reports whether Uint can be used without panicking. -func (v CanIntegerValue) CanUint() bool { - switch v.Kind() { - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return true - default: - return false - } -} diff --git a/vendor/github.com/swaggo/swag/version.go b/vendor/github.com/swaggo/swag/version.go index e5cf8792..ff2810e6 100644 --- a/vendor/github.com/swaggo/swag/version.go +++ b/vendor/github.com/swaggo/swag/version.go @@ -1,4 +1,4 @@ package swag // Version of swag. -const Version = "v1.16.2" +const Version = "v1.16.3" diff --git a/vendor/github.com/urfave/cli/v2/.gitignore b/vendor/github.com/urfave/cli/v2/.gitignore index d7d26b1f..1ef91a60 100644 --- a/vendor/github.com/urfave/cli/v2/.gitignore +++ b/vendor/github.com/urfave/cli/v2/.gitignore @@ -4,6 +4,7 @@ .*envrc .envrc .idea +# goimports is installed here if not available /.local/ /site/ coverage.txt diff --git a/vendor/github.com/urfave/cli/v2/.golangci.yaml b/vendor/github.com/urfave/cli/v2/.golangci.yaml new file mode 100644 index 00000000..89b6e866 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.golangci.yaml @@ -0,0 +1,4 @@ +# https://golangci-lint.run/usage/configuration/ +linters: + enable: + - misspell diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go index 19b76c9b..6e6f6d0d 100644 --- a/vendor/github.com/urfave/cli/v2/app.go +++ b/vendor/github.com/urfave/cli/v2/app.go @@ -23,8 +23,8 @@ var ( fmt.Sprintf("See %s", appActionDeprecationURL), 2) ignoreFlagPrefix = "test." // this is to ignore test flags when adding flags from other packages - SuggestFlag SuggestFlagFunc = suggestFlag - SuggestCommand SuggestCommandFunc = suggestCommand + SuggestFlag SuggestFlagFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestCommand SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate ) @@ -39,6 +39,8 @@ type App struct { Usage string // Text to override the USAGE section of help UsageText string + // Whether this command supports arguments + Args bool // Description of the program argument format. ArgsUsage string // Version of the program @@ -329,6 +331,9 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) { a.rootCommand = a.newRootCommand() cCtx.Command = a.rootCommand + if err := checkDuplicatedCmds(a.rootCommand); err != nil { + return err + } return a.rootCommand.Run(cCtx, arguments...) } @@ -363,6 +368,9 @@ func (a *App) suggestFlagFromError(err error, command string) (string, error) { hideHelp = hideHelp || cmd.HideHelp } + if SuggestFlag == nil { + return "", err + } suggestion := SuggestFlag(flags, flag, hideHelp) if len(suggestion) == 0 { return "", err @@ -455,30 +463,6 @@ func (a *App) handleExitCoder(cCtx *Context, err error) { } } -func (a *App) commandNames() []string { - var cmdNames []string - - for _, cmd := range a.Commands { - cmdNames = append(cmdNames, cmd.Names()...) - } - - return cmdNames -} - -func (a *App) validCommandName(checkCmdName string) bool { - valid := false - allCommandNames := a.commandNames() - - for _, cmdName := range allCommandNames { - if checkCmdName == cmdName { - valid = true - break - } - } - - return valid -} - func (a *App) argsWithDefaultCommand(oldArgs Args) Args { if a.DefaultCommand != "" { rawArgs := append([]string{a.DefaultCommand}, oldArgs.Slice()...) diff --git a/vendor/github.com/urfave/cli/v2/category.go b/vendor/github.com/urfave/cli/v2/category.go index ccc043c2..b1847eca 100644 --- a/vendor/github.com/urfave/cli/v2/category.go +++ b/vendor/github.com/urfave/cli/v2/category.go @@ -111,7 +111,7 @@ func newFlagCategoriesFromFlags(fs []Flag) FlagCategories { } } - if categorized == true { + if categorized { for _, fl := range fs { if cf, ok := fl.(CategorizableFlag); ok { if cf.GetCategory() == "" { diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go index 69a0fdf6..c20a571e 100644 --- a/vendor/github.com/urfave/cli/v2/command.go +++ b/vendor/github.com/urfave/cli/v2/command.go @@ -20,6 +20,8 @@ type Command struct { UsageText string // A longer explanation of how the command works Description string + // Whether this command supports arguments + Args bool // A short description of the arguments of this command ArgsUsage string // The category the command is part of @@ -149,6 +151,9 @@ func (c *Command) Run(cCtx *Context, arguments ...string) (err error) { if !c.isRoot { c.setup(cCtx) + if err := checkDuplicatedCmds(c); err != nil { + return err + } } a := args(arguments) @@ -404,3 +409,16 @@ func hasCommand(commands []*Command, command *Command) bool { return false } + +func checkDuplicatedCmds(parent *Command) error { + seen := make(map[string]struct{}) + for _, c := range parent.Subcommands { + for _, name := range c.Names() { + if _, exists := seen[name]; exists { + return fmt.Errorf("parent command [%s] has duplicated subcommand name or alias: %s", parent.Name, name) + } + seen[name] = struct{}{} + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go index a45c120b..8dd47652 100644 --- a/vendor/github.com/urfave/cli/v2/context.go +++ b/vendor/github.com/urfave/cli/v2/context.go @@ -144,7 +144,7 @@ func (cCtx *Context) Lineage() []*Context { return lineage } -// Count returns the num of occurences of this flag +// Count returns the num of occurrences of this flag func (cCtx *Context) Count(name string) int { if fs := cCtx.lookupFlagSet(name); fs != nil { if cf, ok := fs.Lookup(name).Value.(Countable); ok { diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go index 369d18b7..01862ea7 100644 --- a/vendor/github.com/urfave/cli/v2/flag_bool.go +++ b/vendor/github.com/urfave/cli/v2/flag_bool.go @@ -84,7 +84,10 @@ func (f *BoolFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return fmt.Sprintf("%v", f.defaultValue) + if f.defaultValueSet { + return fmt.Sprintf("%v", f.defaultValue) + } + return fmt.Sprintf("%v", f.Value) } // GetEnvVars returns the env vars for this flag @@ -105,6 +108,7 @@ func (f *BoolFlag) RunAction(c *Context) error { func (f *BoolFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { diff --git a/vendor/github.com/urfave/cli/v2/flag_duration.go b/vendor/github.com/urfave/cli/v2/flag_duration.go index eac4cb83..e600cc30 100644 --- a/vendor/github.com/urfave/cli/v2/flag_duration.go +++ b/vendor/github.com/urfave/cli/v2/flag_duration.go @@ -32,7 +32,10 @@ func (f *DurationFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return f.defaultValue.String() + if f.defaultValueSet { + return f.defaultValue.String() + } + return f.Value.String() } // GetEnvVars returns the env vars for this flag @@ -44,6 +47,7 @@ func (f *DurationFlag) GetEnvVars() []string { func (f *DurationFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { diff --git a/vendor/github.com/urfave/cli/v2/flag_float64.go b/vendor/github.com/urfave/cli/v2/flag_float64.go index bce26c19..6a4de5c8 100644 --- a/vendor/github.com/urfave/cli/v2/flag_float64.go +++ b/vendor/github.com/urfave/cli/v2/flag_float64.go @@ -32,7 +32,10 @@ func (f *Float64Flag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return f.GetValue() + if f.defaultValueSet { + return fmt.Sprintf("%v", f.defaultValue) + } + return fmt.Sprintf("%v", f.Value) } // GetEnvVars returns the env vars for this flag @@ -42,6 +45,9 @@ func (f *Float64Flag) GetEnvVars() []string { // Apply populates the flag given the flag set and environment func (f *Float64Flag) Apply(set *flag.FlagSet) error { + f.defaultValue = f.Value + f.defaultValueSet = true + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { valFloat, err := strconv.ParseFloat(val, 64) diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go index 039ffdfe..7528c934 100644 --- a/vendor/github.com/urfave/cli/v2/flag_generic.go +++ b/vendor/github.com/urfave/cli/v2/flag_generic.go @@ -53,9 +53,15 @@ func (f *GenericFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - if f.defaultValue != nil { - return f.defaultValue.String() + val := f.Value + if f.defaultValueSet { + val = f.defaultValue } + + if val != nil { + return val.String() + } + return "" } @@ -70,6 +76,7 @@ func (f *GenericFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it if f.Value != nil { f.defaultValue = &stringGeneric{value: f.Value.String()} + f.defaultValueSet = true } if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { diff --git a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go index d681270a..750e7ebf 100644 --- a/vendor/github.com/urfave/cli/v2/flag_int.go +++ b/vendor/github.com/urfave/cli/v2/flag_int.go @@ -32,7 +32,10 @@ func (f *IntFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return fmt.Sprintf("%d", f.defaultValue) + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) } // GetEnvVars returns the env vars for this flag @@ -44,6 +47,7 @@ func (f *IntFlag) GetEnvVars() []string { func (f *IntFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { diff --git a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go index 6f8c8d27..688c2671 100644 --- a/vendor/github.com/urfave/cli/v2/flag_int64.go +++ b/vendor/github.com/urfave/cli/v2/flag_int64.go @@ -32,7 +32,10 @@ func (f *Int64Flag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return fmt.Sprintf("%d", f.defaultValue) + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) } // GetEnvVars returns the env vars for this flag @@ -44,6 +47,7 @@ func (f *Int64Flag) GetEnvVars() []string { func (f *Int64Flag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go index c4986779..76cb3524 100644 --- a/vendor/github.com/urfave/cli/v2/flag_path.go +++ b/vendor/github.com/urfave/cli/v2/flag_path.go @@ -33,10 +33,14 @@ func (f *PathFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - if f.defaultValue == "" { - return f.defaultValue + val := f.Value + if f.defaultValueSet { + val = f.defaultValue } - return fmt.Sprintf("%q", f.defaultValue) + if val == "" { + return val + } + return fmt.Sprintf("%q", val) } // GetEnvVars returns the env vars for this flag @@ -48,6 +52,7 @@ func (f *PathFlag) GetEnvVars() []string { func (f *PathFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, _, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { f.Value = val diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go index 4e55a2ca..0f73e062 100644 --- a/vendor/github.com/urfave/cli/v2/flag_string.go +++ b/vendor/github.com/urfave/cli/v2/flag_string.go @@ -31,10 +31,15 @@ func (f *StringFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - if f.defaultValue == "" { - return f.defaultValue + val := f.Value + if f.defaultValueSet { + val = f.defaultValue } - return fmt.Sprintf("%q", f.defaultValue) + + if val == "" { + return val + } + return fmt.Sprintf("%q", val) } // GetEnvVars returns the env vars for this flag @@ -46,6 +51,7 @@ func (f *StringFlag) GetEnvVars() []string { func (f *StringFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, _, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { f.Value = val diff --git a/vendor/github.com/urfave/cli/v2/flag_timestamp.go b/vendor/github.com/urfave/cli/v2/flag_timestamp.go index fa0671fe..b9012308 100644 --- a/vendor/github.com/urfave/cli/v2/flag_timestamp.go +++ b/vendor/github.com/urfave/cli/v2/flag_timestamp.go @@ -120,8 +120,13 @@ func (f *TimestampFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - if f.defaultValue != nil && f.defaultValue.timestamp != nil { - return f.defaultValue.timestamp.String() + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val != nil && val.timestamp != nil { + return val.timestamp.String() } return "" @@ -144,11 +149,7 @@ func (f *TimestampFlag) Apply(set *flag.FlagSet) error { f.Value.SetLocation(f.Timezone) f.defaultValue = f.Value.clone() - - if f.Destination != nil { - f.Destination.SetLayout(f.Layout) - f.Destination.SetLocation(f.Timezone) - } + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if err := f.Value.Set(val); err != nil { @@ -157,6 +158,10 @@ func (f *TimestampFlag) Apply(set *flag.FlagSet) error { f.HasBeenSet = true } + if f.Destination != nil { + *f.Destination = *f.Value + } + for _, name := range f.Names() { if f.Destination != nil { set.Var(f.Destination, name, f.Usage) diff --git a/vendor/github.com/urfave/cli/v2/flag_uint.go b/vendor/github.com/urfave/cli/v2/flag_uint.go index d11aa0a8..8d5b8545 100644 --- a/vendor/github.com/urfave/cli/v2/flag_uint.go +++ b/vendor/github.com/urfave/cli/v2/flag_uint.go @@ -25,6 +25,7 @@ func (f *UintFlag) GetCategory() string { func (f *UintFlag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { @@ -69,7 +70,10 @@ func (f *UintFlag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return fmt.Sprintf("%d", f.defaultValue) + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) } // GetEnvVars returns the env vars for this flag diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64.go b/vendor/github.com/urfave/cli/v2/flag_uint64.go index ea73100a..c356e533 100644 --- a/vendor/github.com/urfave/cli/v2/flag_uint64.go +++ b/vendor/github.com/urfave/cli/v2/flag_uint64.go @@ -25,6 +25,7 @@ func (f *Uint64Flag) GetCategory() string { func (f *Uint64Flag) Apply(set *flag.FlagSet) error { // set default value so that environment wont be able to overwrite it f.defaultValue = f.Value + f.defaultValueSet = true if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { if val != "" { @@ -69,7 +70,10 @@ func (f *Uint64Flag) GetDefaultText() string { if f.DefaultText != "" { return f.DefaultText } - return fmt.Sprintf("%d", f.defaultValue) + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) } // GetEnvVars returns the env vars for this flag diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go index e845dd52..d3420186 100644 --- a/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go +++ b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go @@ -190,6 +190,15 @@ func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 { return ctx.Uint64Slice(f.Name) } +// RunAction executes flag action if set +func (f *Uint64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint64Slice(f.Name)) + } + + return nil +} + // Uint64Slice looks up the value of a local Uint64SliceFlag, returns // nil if not found func (cCtx *Context) Uint64Slice(name string) []uint64 { diff --git a/vendor/github.com/urfave/cli/v2/flag_uint_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go index d2aed480..4dc13e12 100644 --- a/vendor/github.com/urfave/cli/v2/flag_uint_slice.go +++ b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go @@ -201,6 +201,15 @@ func (f *UintSliceFlag) Get(ctx *Context) []uint { return ctx.UintSlice(f.Name) } +// RunAction executes flag action if set +func (f *UintSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.UintSlice(f.Name)) + } + + return nil +} + // UintSlice looks up the value of a local UintSliceFlag, returns // nil if not found func (cCtx *Context) UintSlice(name string) []uint { diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt index 6016bd82..4b620fee 100644 --- a/vendor/github.com/urfave/cli/v2/godoc-current.txt +++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt @@ -27,15 +27,15 @@ application: VARIABLES var ( - SuggestFlag SuggestFlagFunc = suggestFlag - SuggestCommand SuggestCommandFunc = suggestCommand + SuggestFlag SuggestFlagFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestCommand SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate ) var AppHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} VERSION: {{.Version}}{{end}}{{end}}{{if .Description}} @@ -136,7 +136,7 @@ var SubcommandHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}} DESCRIPTION: {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} @@ -253,6 +253,8 @@ type App struct { Usage string // Text to override the USAGE section of help UsageText string + // Whether this command supports arguments + Args bool // Description of the program argument format. ArgsUsage string // Version of the program @@ -523,6 +525,8 @@ type Command struct { UsageText string // A longer explanation of how the command works Description string + // Whether this command supports arguments + Args bool // A short description of the arguments of this command ArgsUsage string // The category the command is part of @@ -649,7 +653,7 @@ func (cCtx *Context) Bool(name string) bool Bool looks up the value of a local BoolFlag, returns false if not found func (cCtx *Context) Count(name string) int - Count returns the num of occurences of this flag + Count returns the num of occurrences of this flag func (cCtx *Context) Duration(name string) time.Duration Duration looks up the value of a local DurationFlag, returns 0 if not found @@ -2142,6 +2146,9 @@ func (f *Uint64SliceFlag) IsVisible() bool func (f *Uint64SliceFlag) Names() []string Names returns the names of the flag +func (f *Uint64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + func (f *Uint64SliceFlag) String() string String returns a readable representation of this value (for usage defaults) @@ -2307,6 +2314,9 @@ func (f *UintSliceFlag) IsVisible() bool func (f *UintSliceFlag) Names() []string Names returns the names of the flag +func (f *UintSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + func (f *UintSliceFlag) String() string String returns a readable representation of this value (for usage defaults) diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go index a71fceb7..640e2904 100644 --- a/vendor/github.com/urfave/cli/v2/help.go +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -69,7 +69,7 @@ var helpCommand = &Command{ } // Case 3, 5 - if (len(cCtx.Command.Subcommands) == 1 && !cCtx.Command.HideHelp) || + if (len(cCtx.Command.Subcommands) == 1 && !cCtx.Command.HideHelp && !cCtx.Command.HideHelpCommand) || (len(cCtx.Command.Subcommands) == 0 && cCtx.Command.HideHelp) { templ := cCtx.Command.CustomHelpTemplate if templ == "" { @@ -209,9 +209,15 @@ func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { func DefaultCompleteWithFlags(cmd *Command) func(cCtx *Context) { return func(cCtx *Context) { - if len(os.Args) > 2 { - lastArg := os.Args[len(os.Args)-2] + var lastArg string + // TODO: This shouldnt depend on os.Args rather it should + // depend on root arguments passed to App + if len(os.Args) > 2 { + lastArg = os.Args[len(os.Args)-2] + } + + if lastArg != "" { if strings.HasPrefix(lastArg, "-") { if cmd != nil { printFlagSuggestions(lastArg, cmd.Flags, cCtx.App.Writer) @@ -272,7 +278,7 @@ func ShowCommandHelp(ctx *Context, command string) error { if ctx.App.CommandNotFound == nil { errMsg := fmt.Sprintf("No help topic for '%v'", command) - if ctx.App.Suggest { + if ctx.App.Suggest && SuggestCommand != nil { if suggestion := SuggestCommand(ctx.Command.Subcommands, command); suggestion != "" { errMsg += ". " + suggestion } @@ -370,17 +376,26 @@ func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) - t.New("helpNameTemplate").Parse(helpNameTemplate) - t.New("usageTemplate").Parse(usageTemplate) - t.New("descriptionTemplate").Parse(descriptionTemplate) - t.New("visibleCommandTemplate").Parse(visibleCommandTemplate) - t.New("copyrightTemplate").Parse(copyrightTemplate) - t.New("versionTemplate").Parse(versionTemplate) - t.New("visibleFlagCategoryTemplate").Parse(visibleFlagCategoryTemplate) - t.New("visibleFlagTemplate").Parse(visibleFlagTemplate) - t.New("visibleGlobalFlagCategoryTemplate").Parse(strings.Replace(visibleFlagCategoryTemplate, "OPTIONS", "GLOBAL OPTIONS", -1)) - t.New("authorsTemplate").Parse(authorsTemplate) - t.New("visibleCommandCategoryTemplate").Parse(visibleCommandCategoryTemplate) + templates := map[string]string{ + "helpNameTemplate": helpNameTemplate, + "usageTemplate": usageTemplate, + "descriptionTemplate": descriptionTemplate, + "visibleCommandTemplate": visibleCommandTemplate, + "copyrightTemplate": copyrightTemplate, + "versionTemplate": versionTemplate, + "visibleFlagCategoryTemplate": visibleFlagCategoryTemplate, + "visibleFlagTemplate": visibleFlagTemplate, + "visibleGlobalFlagCategoryTemplate": strings.Replace(visibleFlagCategoryTemplate, "OPTIONS", "GLOBAL OPTIONS", -1), + "authorsTemplate": authorsTemplate, + "visibleCommandCategoryTemplate": visibleCommandCategoryTemplate, + } + for name, value := range templates { + if _, err := t.New(name).Parse(value); err != nil { + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + } + } err := t.Execute(w, data) if err != nil { @@ -409,6 +424,9 @@ func checkVersion(cCtx *Context) bool { } func checkHelp(cCtx *Context) bool { + if HelpFlag == nil { + return false + } found := false for _, name := range HelpFlag.Names() { if cCtx.Bool(name) { @@ -420,24 +438,6 @@ func checkHelp(cCtx *Context) bool { return found } -func checkCommandHelp(c *Context, name string) bool { - if c.Bool("h") || c.Bool("help") { - _ = ShowCommandHelp(c, name) - return true - } - - return false -} - -func checkSubcommandHelp(cCtx *Context) bool { - if cCtx.Bool("h") || cCtx.Bool("help") { - _ = ShowSubcommandHelp(cCtx) - return true - } - - return false -} - func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { if !a.EnableBashCompletion { return false, arguments diff --git a/vendor/github.com/urfave/cli/v2/mkdocs-requirements.txt b/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt similarity index 100% rename from vendor/github.com/urfave/cli/v2/mkdocs-requirements.txt rename to vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt diff --git a/vendor/github.com/urfave/cli/v2/mkdocs.yml b/vendor/github.com/urfave/cli/v2/mkdocs.yml index 02657b9e..f7bfd419 100644 --- a/vendor/github.com/urfave/cli/v2/mkdocs.yml +++ b/vendor/github.com/urfave/cli/v2/mkdocs.yml @@ -1,7 +1,7 @@ # NOTE: the mkdocs dependencies will need to be installed out of # band until this whole thing gets more automated: # -# pip install -r mkdocs-requirements.txt +# pip install -r mkdocs-reqs.txt # site_name: urfave/cli diff --git a/vendor/github.com/urfave/cli/v2/suggestions.go b/vendor/github.com/urfave/cli/v2/suggestions.go index 87fa905d..9d2b7a81 100644 --- a/vendor/github.com/urfave/cli/v2/suggestions.go +++ b/vendor/github.com/urfave/cli/v2/suggestions.go @@ -1,3 +1,6 @@ +//go:build !urfave_cli_no_suggest +// +build !urfave_cli_no_suggest + package cli import ( @@ -6,6 +9,11 @@ import ( "github.com/xrash/smetrics" ) +func init() { + SuggestFlag = suggestFlag + SuggestCommand = suggestCommand +} + func jaroWinkler(a, b string) float64 { // magic values are from https://github.com/xrash/smetrics/blob/039620a656736e6ad994090895784a7af15e0b80/jaro-winkler.go#L8 const ( @@ -21,7 +29,7 @@ func suggestFlag(flags []Flag, provided string, hideHelp bool) string { for _, flag := range flags { flagNames := flag.Names() - if !hideHelp { + if !hideHelp && HelpFlag != nil { flagNames = append(flagNames, HelpFlag.Names()...) } for _, name := range flagNames { diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go index da98890e..daad4700 100644 --- a/vendor/github.com/urfave/cli/v2/template.go +++ b/vendor/github.com/urfave/cli/v2/template.go @@ -1,7 +1,7 @@ package cli var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}` -var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}` +var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}[arguments...]{{end}}{{end}}` var descriptionTemplate = `{{wrap .Description 3}}` var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: {{range $index, $author := .Authors}}{{if $index}} @@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} VERSION: {{.Version}}{{end}}{{end}}{{if .Description}} @@ -83,7 +83,7 @@ var SubcommandHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}} DESCRIPTION: {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} diff --git a/vendor/github.com/urfave/cli/v2/zz_generated.flags.go b/vendor/github.com/urfave/cli/v2/zz_generated.flags.go index 73e77145..f2fc8c88 100644 --- a/vendor/github.com/urfave/cli/v2/zz_generated.flags.go +++ b/vendor/github.com/urfave/cli/v2/zz_generated.flags.go @@ -23,7 +23,8 @@ type Float64SliceFlag struct { Aliases []string EnvVars []string - defaultValue *Float64Slice + defaultValue *Float64Slice + defaultValueSet bool separator separatorSpec @@ -69,7 +70,8 @@ type GenericFlag struct { Aliases []string EnvVars []string - defaultValue Generic + defaultValue Generic + defaultValueSet bool TakesFile bool @@ -120,7 +122,8 @@ type Int64SliceFlag struct { Aliases []string EnvVars []string - defaultValue *Int64Slice + defaultValue *Int64Slice + defaultValueSet bool separator separatorSpec @@ -166,7 +169,8 @@ type IntSliceFlag struct { Aliases []string EnvVars []string - defaultValue *IntSlice + defaultValue *IntSlice + defaultValueSet bool separator separatorSpec @@ -212,7 +216,8 @@ type PathFlag struct { Aliases []string EnvVars []string - defaultValue Path + defaultValue Path + defaultValueSet bool TakesFile bool @@ -263,7 +268,8 @@ type StringSliceFlag struct { Aliases []string EnvVars []string - defaultValue *StringSlice + defaultValue *StringSlice + defaultValueSet bool separator separatorSpec @@ -313,7 +319,8 @@ type TimestampFlag struct { Aliases []string EnvVars []string - defaultValue *Timestamp + defaultValue *Timestamp + defaultValueSet bool Layout string @@ -366,7 +373,8 @@ type Uint64SliceFlag struct { Aliases []string EnvVars []string - defaultValue *Uint64Slice + defaultValue *Uint64Slice + defaultValueSet bool separator separatorSpec @@ -412,7 +420,8 @@ type UintSliceFlag struct { Aliases []string EnvVars []string - defaultValue *UintSlice + defaultValue *UintSlice + defaultValueSet bool separator separatorSpec @@ -458,7 +467,8 @@ type BoolFlag struct { Aliases []string EnvVars []string - defaultValue bool + defaultValue bool + defaultValueSet bool Count *int @@ -511,7 +521,8 @@ type Float64Flag struct { Aliases []string EnvVars []string - defaultValue float64 + defaultValue float64 + defaultValueSet bool Action func(*Context, float64) error } @@ -560,7 +571,8 @@ type IntFlag struct { Aliases []string EnvVars []string - defaultValue int + defaultValue int + defaultValueSet bool Base int @@ -611,7 +623,8 @@ type Int64Flag struct { Aliases []string EnvVars []string - defaultValue int64 + defaultValue int64 + defaultValueSet bool Base int @@ -662,7 +675,8 @@ type StringFlag struct { Aliases []string EnvVars []string - defaultValue string + defaultValue string + defaultValueSet bool TakesFile bool @@ -713,7 +727,8 @@ type DurationFlag struct { Aliases []string EnvVars []string - defaultValue time.Duration + defaultValue time.Duration + defaultValueSet bool Action func(*Context, time.Duration) error } @@ -762,7 +777,8 @@ type UintFlag struct { Aliases []string EnvVars []string - defaultValue uint + defaultValue uint + defaultValueSet bool Base int @@ -813,7 +829,8 @@ type Uint64Flag struct { Aliases []string EnvVars []string - defaultValue uint64 + defaultValue uint64 + defaultValueSet bool Base int diff --git a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go index ba624fbc..483a086d 100644 --- a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go +++ b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go @@ -106,6 +106,14 @@ func (errs List) As(target interface{}) bool { return false } +func (errs List) Unwrap() []error { + l := make([]error, len(errs)) + for i, err := range errs { + l[i] = err + } + return l +} + func WrapPath(path ast.Path, err error) *Error { if err == nil { return nil diff --git a/vendor/github.com/yusufpapurcu/wmi/wmi.go b/vendor/github.com/yusufpapurcu/wmi/wmi.go index 26c3581c..03f386ed 100644 --- a/vendor/github.com/yusufpapurcu/wmi/wmi.go +++ b/vendor/github.com/yusufpapurcu/wmi/wmi.go @@ -456,6 +456,18 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat Reason: "not a Float32", } } + case float64: + switch f.Kind() { + case reflect.Float32, reflect.Float64: + f.SetFloat(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float64", + } + } + default: if f.Kind() == reflect.Slice { switch f.Type().Elem().Kind() { diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index fbc6df79..2346df13 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d6..4fea3027 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 11b46597..6d6cd5f4 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 9de08927..a17035cb 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5..0b8540c2 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index c8dd3358..6743930b 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 6205fe48..c4d30032 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index c4f3bca3..43d357ac 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 00ac5fe3..8904cd08 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf..cc2b4e07 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e..04462541 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a1..308c9781 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab8697..9685169b 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index 4269ed11..bf225953 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { // This is the exposed reflection of the internal OCSP structures. -// The status values that can be expressed in OCSP. See RFC 6960. +// The status values that can be expressed in OCSP. See RFC 6960. +// These are used for the Response.Status field. const ( // Good means that the certificate is valid. - Good = iota + Good = 0 // Revoked means that the certificate has been deliberately revoked. - Revoked + Revoked = 1 // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown + Unknown = 2 // ServerFailed is unused and was never used (see // https://go-review.googlesource.com/#/c/18944). ParseResponse will // return a ResponseError when an error response is parsed. - ServerFailed + ServerFailed = 3 ) -// The enumerated reasons for revoking a certificate. See RFC 5280. +// The enumerated reasons for revoking a certificate. See RFC 5280. const ( Unspecified = 0 KeyCompromise = 1 diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938..3c57880d 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..e2b298d8 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020..fdcaa974 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8d..36bf8399 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821c..42ff8c3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e411..dca43600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c6398556..5cca668a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e2..d8cae6d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09..28e39afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642..cd66e92c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d7..c1595eba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0..ee9456b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84..8cfca81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d..60b0deb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc7..f90aa728 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e..ba9e0150 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813e..07cdfd6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4e..2f1dd214 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994d..f40519d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbd..0cc3ce49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc250..856d92d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf246..8d467094 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e..edc17324 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77..445eba20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362e..adba01bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4f..014c4e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca8..ccc97d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d3..ec2b64a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e374..21a839e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c..c11121ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e59..909b631f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195..e49bed16 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943b..66017d2d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc5..47bab18d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399f..dc0c955e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -3399,7 +3403,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4187,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5139,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5552,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad1925..d4577a42 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708c..6395a031 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go index 7cf523bc..32c8d142 100644 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -4,17 +4,22 @@ package buildutil -// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. +// This duplicated logic must be kept in sync with that from go build: +// $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) +// $GOROOT/src/cmd/go/internal/base/flag.go (StringsFlag.Set) +// $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) -import "fmt" +import ( + "fmt" + "strings" +) const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + "For more information about build tags, see the description of " + "build constraints in the documentation for the go/build package" // TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses -// a flag value in the same manner as go build's -tags flag and -// populates a []string slice. +// a flag value the same as go build's -tags flag and populates a []string slice. // // See $GOROOT/src/go/build/doc.go for description of build tags. // See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. @@ -25,19 +30,32 @@ const TagsFlagDoc = "a list of `build tags` to consider satisfied during the bui type TagsFlag []string func (v *TagsFlag) Set(s string) error { - var err error - *v, err = splitQuotedFields(s) - if *v == nil { - *v = []string{} + // See $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) + // For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'". + if strings.Contains(s, " ") || strings.Contains(s, "'") { + var err error + *v, err = splitQuotedFields(s) + if *v == nil { + *v = []string{} + } + return err } - return err + + // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. + *v = []string{} + for _, s := range strings.Split(s, ",") { + if s != "" { + *v = append(*v, s) + } + } + return nil } func (v *TagsFlag) Get() interface{} { return *v } func splitQuotedFields(s string) ([]string, error) { - // Split fields allowing '' or "" around elements. - // Quotes further inside the string do not count. + // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) + // This must remain in sync with that logic. var f []string for len(s) > 0 { for len(s) > 0 && isSpaceByte(s[0]) { @@ -76,5 +94,7 @@ func (v *TagsFlag) String() string { } func isSpaceByte(c byte) bool { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + // This list must remain in sync with that. return c == ' ' || c == '\t' || c == '\n' || c == '\r' } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index b2a0b7c6..a8d7b06a 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool. The default build tool is the go command. Its supported patterns are described at https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. -Load may be used in Go projects that use alternative build systems, by -installing an appropriate "driver" program for the build system and -specifying its location in the GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -they identify. -(See driverRequest and driverResponse types for the JSON -schema used by the protocol. -Though the protocol is supported, these types are currently unexported; -see #64608 for a proposal to publish them.) - -Regardless of driver, all patterns with the prefix "query=", where query is a +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -86,7 +74,29 @@ for details. Most tools should pass their command-line arguments (after any flags) uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7db1d129..4335c1eb 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" @@ -17,31 +16,71 @@ import ( "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cd375fbc..22305d9c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -35,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err response.dr.Compiler = compiler response.dr.Arch = arch - sizeswg.Done() + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -208,10 +210,7 @@ extractQueries: } } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } @@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 81e9e6a7..f33b0afc 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -206,43 +206,6 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; @@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { if driver := findExternalDriver(cfg); driver != nil { response, err := driver(cfg, patterns...) if err != nil { @@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro } response, err := goListDriver(cfg, patterns...) - return response, false, err + if err != nil { + return nil, false, err + } + return response, false, nil } // A Package describes a loaded Go package. @@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9bde15e3..9fffa9ad 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Gather the relevant packages from the manifest. items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) @@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) } // Request packages all at once from the client, diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 52f74e64..83615155 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -9,11 +9,13 @@ package gopathwalk import ( "bufio" "bytes" + "io" "io/fs" - "log" "os" "path/filepath" + "runtime" "strings" + "sync" "time" ) @@ -21,8 +23,13 @@ import ( type Options struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool + + // Maximum number of concurrent calls to user-provided callbacks, + // or 0 for GOMAXPROCS. + Concurrency int } // RootType indicates the type of a Root. @@ -43,19 +50,28 @@ type Root struct { Type RootType } -// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// // For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. +// +// Unlike filepath.WalkDir, Walk follows symbolic links +// (while guarding against cycles). func Walk(roots []Root, add func(root Root, dir string), opts Options) { WalkSkip(roots, add, func(Root, string) bool { return false }, opts) } -// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to +// find packages. +// // For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. // For each directory that will be scanned, skip will be called // with the absolute paths of the containing source directory and the directory. // If skip returns false on a directory it will be processed. +// +// Unlike filepath.WalkDir, WalkSkip follows symbolic links +// (while guarding against cycles). func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { for _, root := range roots { walkDir(root, add, skip, opts) @@ -64,45 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if opts.Logf == nil { + opts.Logf = func(format string, args ...interface{}) {} + } if _, err := os.Stat(root.Path); os.IsNotExist(err) { - if opts.Logf != nil { - opts.Logf("skipping nonexistent directory: %v", root.Path) - } + opts.Logf("skipping nonexistent directory: %v", root.Path) return } start := time.Now() - if opts.Logf != nil { - opts.Logf("scanning %s", root.Path) - } + opts.Logf("scanning %s", root.Path) + concurrency := opts.Concurrency + if concurrency == 0 { + // The walk be either CPU-bound or I/O-bound, depending on what the + // caller-supplied add function does and the details of the user's platform + // and machine. Rather than trying to fine-tune the concurrency level for a + // specific environment, we default to GOMAXPROCS: it is likely to be a good + // choice for a CPU-bound add function, and if it is instead I/O-bound, then + // dealing with I/O saturation is arguably the job of the kernel and/or + // runtime. (Oversaturating I/O seems unlikely to harm performance as badly + // as failing to saturate would.) + concurrency = runtime.GOMAXPROCS(0) + } w := &walker{ - root: root, - add: add, - skip: skip, - opts: opts, - added: make(map[string]bool), + root: root, + add: add, + skip: skip, + opts: opts, + sem: make(chan struct{}, concurrency), } w.init() - // Add a trailing path separator to cause filepath.WalkDir to traverse symlinks. + w.sem <- struct{}{} path := root.Path - if len(path) == 0 { - path = "." + string(filepath.Separator) - } else if !os.IsPathSeparator(path[len(path)-1]) { - path = path + string(filepath.Separator) + if path == "" { + path = "." } + if fi, err := os.Lstat(path); err == nil { + w.walk(path, nil, fs.FileInfoToDirEntry(fi)) + } else { + w.opts.Logf("scanning directory %v: %v", root.Path, err) + } + <-w.sem + w.walking.Wait() - if err := filepath.WalkDir(path, w.walk); err != nil { - logf := opts.Logf - if logf == nil { - logf = log.Printf - } - logf("scanning directory %v: %v", root.Path, err) - } - - if opts.Logf != nil { - opts.Logf("scanned %s in %v", root.Path, time.Since(start)) - } + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) } // walker is the callback for fastwalk.Walk. @@ -112,10 +134,18 @@ type walker struct { skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. opts Options // Options passed to Walk by the user. - pathSymlinks []os.FileInfo - ignoredDirs []string + walking sync.WaitGroup + sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release. + ignoredDirs []string - added map[string]bool + added sync.Map // map[string]bool +} + +// A symlinkList is a linked list of os.FileInfos for parent directories +// reached via symlinks. +type symlinkList struct { + info os.FileInfo + prev *symlinkList } // init initializes the walker based on its Options @@ -132,9 +162,7 @@ func (w *walker) init() { for _, p := range ignoredPaths { full := filepath.Join(w.root.Path, p) w.ignoredDirs = append(w.ignoredDirs, full) - if w.opts.Logf != nil { - w.opts.Logf("Directory added to ignore list: %s", full) - } + w.opts.Logf("Directory added to ignore list: %s", full) } } @@ -144,12 +172,10 @@ func (w *walker) init() { func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") slurp, err := os.ReadFile(file) - if w.opts.Logf != nil { - if err != nil { - w.opts.Logf("%v", err) - } else { - w.opts.Logf("Read %s", file) - } + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) } if err != nil { return nil @@ -183,63 +209,22 @@ func (w *walker) shouldSkipDir(dir string) bool { // walk walks through the given path. // -// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored: -// walk returns only nil or fs.SkipDir. -func (w *walker) walk(path string, d fs.DirEntry, err error) error { - if err != nil { - // We have no way to report errors back through Walk or WalkSkip, - // so just log and ignore them. - if w.opts.Logf != nil { - w.opts.Logf("%v", err) - } - if d == nil { - // Nothing more to do: the error prevents us from knowing - // what path even represents. - return nil - } - } - - if d.Type().IsRegular() { - if !strings.HasSuffix(path, ".go") { - return nil - } - - dir := filepath.Dir(path) - if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { - // Doesn't make sense to have regular files - // directly in your $GOPATH/src or $GOROOT/src. - return nil - } - - if !w.added[dir] { - w.add(w.root, dir) - w.added[dir] = true - } - return nil - } - - if d.IsDir() { - base := filepath.Base(path) - if base == "" || base[0] == '.' || base[0] == '_' || - base == "testdata" || - (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || - (!w.opts.ModulesEnabled && base == "node_modules") { - return fs.SkipDir - } - if w.shouldSkipDir(path) { - return fs.SkipDir - } - return nil - } - +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored. +func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) { if d.Type()&os.ModeSymlink != 0 { + // Walk the symlink's target rather than the symlink itself. + // + // (Note that os.Stat, unlike the lower-lever os.Readlink, + // follows arbitrarily many layers of symlinks, so it will eventually + // reach either a non-symlink or a nonexistent target.) + // // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src // and GOPATH/src. Do we really need to traverse them here? If so, why? fi, err := os.Stat(path) - if err != nil || !fi.IsDir() { - // Not a directory. Just walk the file (or broken link) and be done. - return w.walk(path, fs.FileInfoToDirEntry(fi), err) + if err != nil { + w.opts.Logf("%v", err) + return } // Avoid walking symlink cycles: if we have already followed a symlink to @@ -249,83 +234,104 @@ func (w *walker) walk(path string, d fs.DirEntry, err error) error { // the number of extra stat calls we make if we *don't* encounter a cycle. // Since we don't actually expect to encounter symlink cycles in practice, // this seems like the right tradeoff. - for _, parent := range w.pathSymlinks { - if os.SameFile(fi, parent) { - return nil + for parent := pathSymlinks; parent != nil; parent = parent.prev { + if os.SameFile(fi, parent.info) { + return } } - w.pathSymlinks = append(w.pathSymlinks, fi) - defer func() { - w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1] - }() + pathSymlinks = &symlinkList{ + info: fi, + prev: pathSymlinks, + } + d = fs.FileInfoToDirEntry(fi) + } - // On some platforms the OS (or the Go os package) sometimes fails to - // resolve directory symlinks before a trailing slash - // (even though POSIX requires it to do so). - // - // On macOS that failure may be caused by a known libc/kernel bug; - // see https://go.dev/issue/59586. - // - // On Windows before Go 1.21, it may be caused by a bug in - // os.Lstat (fixed in https://go.dev/cl/463177). - // - // Since we need to handle this explicitly on broken platforms anyway, - // it is simplest to just always do that and not rely on POSIX pathname - // resolution to walk the directory (such as by calling WalkDir with - // a trailing slash appended to the path). - // - // Instead, we make a sequence of walk calls — directly and through - // recursive calls to filepath.WalkDir — simulating what WalkDir would do - // if the symlink were a regular directory. - - // First we call walk on the path as a directory - // (instead of a symlink). - err = w.walk(path, fs.FileInfoToDirEntry(fi), nil) - if err == fs.SkipDir { - return nil - } else if err != nil { - // This should be impossible, but handle it anyway in case - // walk is changed to return other errors. - return err + if d.Type().IsRegular() { + if !strings.HasSuffix(path, ".go") { + return } - // Now read the directory and walk its entries. - ents, err := os.ReadDir(path) + dir := filepath.Dir(path) + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + // + // TODO(bcmills): there are many levels of directory within + // RootModuleCache where this also wouldn't make sense, + // Can we generalize this to any directory without a corresponding + // import path? + return + } + + if _, dup := w.added.LoadOrStore(dir, true); !dup { + w.add(w.root, dir) + } + } + + if !d.IsDir() { + return + } + + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") || + w.shouldSkipDir(path) { + return + } + + // Read the directory and walk its entries. + + f, err := os.Open(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + defer f.Close() + + for { + // We impose an arbitrary limit on the number of ReadDir results per + // directory to limit the amount of memory consumed for stale or upcoming + // directory entries. The limit trades off CPU (number of syscalls to read + // the whole directory) against RAM (reachable directory entries other than + // the one currently being processed). + // + // Since we process the directories recursively, we will end up maintaining + // a slice of entries for each level of the directory tree. + // (Compare https://go.dev/issue/36197.) + ents, err := f.ReadDir(1024) if err != nil { - // Report the ReadDir error, as filepath.WalkDir would do. - err = w.walk(path, fs.FileInfoToDirEntry(fi), err) - if err == fs.SkipDir { - return nil - } else if err != nil { - return err // Again, should be impossible. + if err != io.EOF { + w.opts.Logf("%v", err) } - // Fall through and iterate over whatever entries we did manage to get. + break } for _, d := range ents { nextPath := filepath.Join(path, d.Name()) if d.IsDir() { - // We want to walk the whole directory tree rooted at nextPath, - // not just the single entry for the directory. - err := filepath.WalkDir(nextPath, w.walk) - if err != nil && w.opts.Logf != nil { - w.opts.Logf("%v", err) - } - } else { - err := w.walk(nextPath, d, nil) - if err == fs.SkipDir { - // Skip the rest of the entries in the parent directory of nextPath - // (that is, path itself). - break - } else if err != nil { - return err // Again, should be impossible. + select { + case w.sem <- struct{}{}: + // Got a new semaphore token, so we can traverse the directory concurrently. + d := d + w.walking.Add(1) + go func() { + defer func() { + <-w.sem + w.walking.Done() + }() + w.walk(nextPath, pathSymlinks, d) + }() + continue + + default: + // No tokens available, so traverse serially. } } - } - return nil - } - // Not a file, regular directory, or symlink; skip. - return nil + w.walk(nextPath, pathSymlinks, d) + } + } } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index dd369c07..6a18f63a 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -13,6 +13,7 @@ import ( "go/build" "go/parser" "go/token" + "go/types" "io/fs" "io/ioutil" "os" @@ -700,20 +701,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map return result, nil } -func PrimeCache(ctx context.Context, env *ProcessEnv) error { +func PrimeCache(ctx context.Context, resolver Resolver) error { // Fully scan the disk for directories, but don't actually read any Go files. callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true + rootFound: func(root gopathwalk.Root) bool { + // See getCandidatePkgs: walking GOROOT is apparently expensive and + // unnecessary. + return root.Type != gopathwalk.RootGOROOT }, dirFound: func(pkg *pkg) bool { return false }, - packageNameLoaded: func(pkg *pkg) bool { - return false - }, + // packageNameLoaded and exportsLoaded must never be called. } - return getCandidatePkgs(ctx, callback, "", "", env) + + return resolver.scan(ctx, callback) } func candidateImportName(pkg *pkg) string { @@ -827,16 +829,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} +// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate +// imports when doing cross-platform development. +var requiredGoEnvVars = []string{ + "GO111MODULE", + "GOFLAGS", + "GOINSECURE", + "GOMOD", + "GOMODCACHE", + "GONOPROXY", + "GONOSUMDB", + "GOPATH", + "GOPROXY", + "GOROOT", + "GOSUMDB", + "GOWORK", +} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. +// +// ...a ProcessEnv *also* overwrites its Env along with derived state in the +// form of the resolver. And because it is lazily initialized, an env may just +// be broken and unusable, but there is no way for the caller to detect that: +// all queries will just fail. +// +// TODO(rfindley): refactor this package so that this type (perhaps renamed to +// just Env or Config) is an immutable configuration struct, to be exchanged +// for an initialized object via a constructor that returns an error. Perhaps +// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where +// resolver is a concrete type used for resolving imports. Via this +// refactoring, we can avoid the need to call ProcessEnv.init and +// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this +// these are misused. Also, we'd delegate the caller the decision of how to +// handle a broken environment. type ProcessEnv struct { GocmdRunner *gocommand.Runner BuildFlags []string ModFlag string - ModFile string // SkipPathInScan returns true if the path should be skipped from scans of // the RootCurrentModule root type. The function argument is a clean, @@ -846,7 +877,7 @@ type ProcessEnv struct { // Env overrides the OS environment, and can be used to specify // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because // exec.Command will not honor it. - // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + // Specifying all of requiredGoEnvVars avoids a call to `go env`. Env map[string]string WorkingDir string @@ -854,9 +885,17 @@ type ProcessEnv struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) - initialized bool + // If set, ModCache holds a shared cache of directory info to use across + // multiple ProcessEnvs. + ModCache *DirInfoCache - resolver Resolver + initialized bool // see TODO above + + // resolver and resolverErr are lazily evaluated (see GetResolver). + // This is unclean, but see the big TODO in the docstring for ProcessEnv + // above: for now, we can't be sure that the ProcessEnv is fully initialized. + resolver Resolver + resolverErr error } func (e *ProcessEnv) goEnv() (map[string]string, error) { @@ -936,20 +975,31 @@ func (e *ProcessEnv) env() []string { } func (e *ProcessEnv) GetResolver() (Resolver, error) { - if e.resolver != nil { - return e.resolver, nil - } if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { - e.resolver = newGopathResolver(e) - return e.resolver, nil + + if e.resolver == nil && e.resolverErr == nil { + // TODO(rfindley): we should only use a gopathResolver here if the working + // directory is actually *in* GOPATH. (I seem to recall an open gopls issue + // for this behavior, but I can't find it). + // + // For gopls, we can optionally explicitly choose a resolver type, since we + // already know the view type. + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + e.resolver = newGopathResolver(e) + } else { + e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache) + } } - e.resolver = newModuleResolver(e) - return e.resolver, nil + + return e.resolver, e.resolverErr } +// buildContext returns the build.Context to use for matching files. +// +// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform +// development. func (e *ProcessEnv) buildContext() (*build.Context, error) { ctx := build.Default goenv, err := e.goEnv() @@ -1029,15 +1079,23 @@ func addStdlibCandidates(pass *pass, refs references) error { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. scoreImportPath(ctx context.Context, path string) float64 - ClearForNewScan() + // ClearForNewScan returns a new Resolver based on the receiver that has + // cleared its internal caches of directory contents. + // + // The new resolver should be primed and then set via + // [ProcessEnv.UpdateResolver]. + ClearForNewScan() Resolver } // A scanCallback controls a call to scan and receives its results. @@ -1120,7 +1178,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) if err != nil { firstErrOnce.Do(func() { @@ -1151,6 +1209,17 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil }() for result := range results { + // Don't offer completions that would shadow predeclared + // names, such as github.com/coreos/etcd/error. + if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + // Ideally we would skip this candidate only + // if the predeclared name is actually + // referenced by the file, but that's a lot + // trickier to compute and would still create + // an import that is likely to surprise the + // user before long. + continue + } pass.addCandidate(result.imp, result.pkg) } return firstErr @@ -1193,31 +1262,22 @@ func ImportPathToAssumedName(importPath string) string { type gopathResolver struct { env *ProcessEnv walked bool - cache *dirInfoCache + cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } func newGopathResolver(env *ProcessEnv) *gopathResolver { r := &gopathResolver{ - env: env, - cache: &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - }, + env: env, + cache: NewDirInfoCache(), scanSema: make(chan struct{}, 1), } r.scanSema <- struct{}{} return r } -func (r *gopathResolver) ClearForNewScan() { - <-r.scanSema - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - r.walked = false - r.scanSema <- struct{}{} +func (r *gopathResolver) ClearForNewScan() Resolver { + return newGopathResolver(r.env) } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { @@ -1538,7 +1598,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 58e637b9..66040754 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -236,7 +236,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast src = src[:len(src)-len("}\n")] // Gofmt has also indented the function body one level. // Remove that indent. - src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n")) return matchSpace(orig, src) } return file, adjust, nil diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 5f4d435d..3d0f38f6 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -23,49 +23,88 @@ import ( "golang.org/x/tools/internal/gopathwalk" ) -// ModuleResolver implements resolver for modules using the go command as little -// as feasible. +// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning +// as fast as possible, which is desirable for a call to goimports from the +// command line, but it doesn't work as well for gopls, where it suffers from +// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216), +// both caused by populating the cache, albeit in slightly different ways. +// +// A high level list of TODOs: +// - Optimize the scan itself, as there is some redundancy statting and +// reading go.mod files. +// - Invert the relationship between ProcessEnv and Resolver (see the +// docstring of ProcessEnv). +// - Make it easier to use an external resolver implementation. +// +// Smaller TODOs are annotated in the code below. + +// ModuleResolver implements the Resolver interface for a workspace using +// modules. +// +// A goal of the ModuleResolver is to invoke the Go command as little as +// possible. To this end, it runs the Go command only for listing module +// information (i.e. `go list -m -e -json ...`). Package scanning, the process +// of loading package information for the modules, is implemented internally +// via the scan method. +// +// It has two types of state: the state derived from the go command, which +// is populated by init, and the state derived from scans, which is populated +// via scan. A root is considered scanned if it has been walked to discover +// directories. However, if the scan did not require additional information +// from the directory (such as package name or exports), the directory +// information itself may be partially populated. It will be lazily filled in +// as needed by scans, using the scanCallback. type ModuleResolver struct { - env *ProcessEnv - moduleCacheDir string - dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. - roots []gopathwalk.Root - scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. - scannedRoots map[gopathwalk.Root]bool + env *ProcessEnv - initialized bool - mains []*gocommand.ModuleJSON - mainByDir map[string]*gocommand.ModuleJSON - modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. + // Module state, populated during construction + dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory + moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset + roots []gopathwalk.Root // roots to scan, in approximate order of importance + mains []*gocommand.ModuleJSON // main modules + mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots + modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path + modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir. - // moduleCacheCache stores information about the module cache. - moduleCacheCache *dirInfoCache - otherCache *dirInfoCache + // Scanning state, populated by scan + + // scanSema prevents concurrent scans, and guards scannedRoots and the cache + // fields below (though the caches themselves are concurrency safe). + // Receive to acquire, send to release. + scanSema chan struct{} + scannedRoots map[gopathwalk.Root]bool // if true, root has been walked + + // Caches of directory info, populated by scans and scan callbacks + // + // moduleCacheCache stores cached information about roots in the module + // cache, which are immutable and therefore do not need to be invalidated. + // + // otherCache stores information about all other roots (even GOROOT), which + // may change. + moduleCacheCache *DirInfoCache + otherCache *DirInfoCache } -func newModuleResolver(e *ProcessEnv) *ModuleResolver { +// newModuleResolver returns a new module-aware goimports resolver. +// +// Note: use caution when modifying this constructor: changes must also be +// reflected in ModuleResolver.ClearForNewScan. +func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) { r := &ModuleResolver{ env: e, scanSema: make(chan struct{}, 1), } - r.scanSema <- struct{}{} - return r -} - -func (r *ModuleResolver) init() error { - if r.initialized { - return nil - } + r.scanSema <- struct{}{} // release goenv, err := r.env.goEnv() if err != nil { - return err + return nil, err } + + // TODO(rfindley): can we refactor to share logic with r.env.invokeGo? inv := gocommand.Invocation{ BuildFlags: r.env.BuildFlags, ModFlag: r.env.ModFlag, - ModFile: r.env.ModFile, Env: r.env.env(), Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, @@ -77,9 +116,12 @@ func (r *ModuleResolver) init() error { // Module vendor directories are ignored in workspace mode: // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md if len(r.env.Env["GOWORK"]) == 0 { + // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but + // they should be available from the ProcessEnv. Can we avoid the redundant + // invocation? vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { - return err + return nil, err } } @@ -100,19 +142,14 @@ func (r *ModuleResolver) init() error { // GO111MODULE=on. Other errors are fatal. if err != nil { if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { - return err + return nil, err } } } - if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { - r.moduleCacheDir = gmc - } else { - gopaths := filepath.SplitList(goenv["GOPATH"]) - if len(gopaths) == 0 { - return fmt.Errorf("empty GOPATH") - } - r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + r.moduleCacheDir = gomodcacheForEnv(goenv) + if r.moduleCacheDir == "" { + return nil, fmt.Errorf("cannot resolve GOMODCACHE") } sort.Slice(r.modsByModPath, func(i, j int) bool { @@ -141,7 +178,11 @@ func (r *ModuleResolver) init() error { } else { addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { - // This is redundant with the cache, but we'll skip it cheaply enough. + // This is redundant with the cache, but we'll skip it cheaply enough + // when we encounter it in the module cache scan. + // + // Including it at a lower index in r.roots than the module cache dir + // helps prioritize matches from within existing dependencies. r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) } else { r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) @@ -158,24 +199,40 @@ func (r *ModuleResolver) init() error { addDep(mod) } } + // If provided, share the moduleCacheCache. + // + // TODO(rfindley): The module cache is immutable. However, the loaded + // exports do depend on GOOS and GOARCH. Fortunately, the + // ProcessEnv.buildContext does not adjust these from build.DefaultContext + // (even though it should). So for now, this is OK to share, but we need to + // add logic for handling GOOS/GOARCH. + r.moduleCacheCache = moduleCacheCache r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) } r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { - r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } + r.moduleCacheCache = NewDirInfoCache() } - if r.otherCache == nil { - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } + r.otherCache = NewDirInfoCache() + return r, nil +} + +// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env +// map, which must have GOMODCACHE and GOPATH populated. +// +// TODO(rfindley): this is defensive refactoring. +// 1. Is this even relevant anymore? Can't we just read GOMODCACHE. +// 2. Use this to separate module cache scanning from other scanning. +func gomodcacheForEnv(goenv map[string]string) string { + if gmc := goenv["GOMODCACHE"]; gmc != "" { + return gmc } - r.initialized = true - return nil + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return "" + } + return filepath.Join(gopaths[0], "/pkg/mod") } func (r *ModuleResolver) initAllMods() error { @@ -206,30 +263,82 @@ func (r *ModuleResolver) initAllMods() error { return nil } -func (r *ModuleResolver) ClearForNewScan() { - <-r.scanSema - r.scannedRoots = map[gopathwalk.Root]bool{} - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - r.scanSema <- struct{}{} -} +// ClearForNewScan invalidates the last scan. +// +// It preserves the set of roots, but forgets about the set of directories. +// Though it forgets the set of module cache directories, it remembers their +// contents, since they are assumed to be immutable. +func (r *ModuleResolver) ClearForNewScan() Resolver { + <-r.scanSema // acquire r, to guard scannedRoots + r2 := &ModuleResolver{ + env: r.env, + dummyVendorMod: r.dummyVendorMod, + moduleCacheDir: r.moduleCacheDir, + roots: r.roots, + mains: r.mains, + mainByDir: r.mainByDir, + modsByModPath: r.modsByModPath, -func (r *ModuleResolver) ClearForNewMod() { - <-r.scanSema - *r = ModuleResolver{ - env: r.env, + scanSema: make(chan struct{}, 1), + scannedRoots: make(map[gopathwalk.Root]bool), + otherCache: NewDirInfoCache(), moduleCacheCache: r.moduleCacheCache, - otherCache: r.otherCache, - scanSema: r.scanSema, } - r.init() - r.scanSema <- struct{}{} + r2.scanSema <- struct{}{} // r2 must start released + // Invalidate root scans. We don't need to invalidate module cache roots, + // because they are immutable. + // (We don't support a use case where GOMODCACHE is cleaned in the middle of + // e.g. a gopls session: the user must restart gopls to get accurate + // imports.) + // + // Scanning for new directories in GOMODCACHE should be handled elsewhere, + // via a call to ScanModuleCache. + for _, root := range r.roots { + if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] { + r2.scannedRoots[root] = true + } + } + r.scanSema <- struct{}{} // release r + return r2 } -// findPackage returns the module and directory that contains the package at -// the given import path, or returns nil, "" if no module is in scope. +// ClearModuleInfo invalidates resolver state that depends on go.mod file +// contents (essentially, the output of go list -m -json ...). +// +// Notably, it does not forget directory contents, which are reset +// asynchronously via ClearForNewScan. +// +// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op. +// +// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. +func (e *ProcessEnv) ClearModuleInfo() { + if r, ok := e.resolver.(*ModuleResolver); ok { + resolver, resolverErr := newModuleResolver(e, e.ModCache) + if resolverErr == nil { + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + } + e.resolver = resolver + e.resolverErr = resolverErr + } +} + +// UpdateResolver sets the resolver for the ProcessEnv to use in imports +// operations. Only for use with the result of [Resolver.ClearForNewScan]. +// +// TODO(rfindley): this awkward API is a result of the (arguably) inverted +// relationship between configuration and state described in the doc comment +// for [ProcessEnv]. +func (e *ProcessEnv) UpdateResolver(r Resolver) { + e.resolver = r + e.resolverErr = nil +} + +// findPackage returns the module and directory from within the main modules +// and their dependencies that contains the package at the given import path, +// or returns nil, "" if no module is in scope. func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. @@ -295,10 +404,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { } } -func (r *ModuleResolver) cacheKeys() []string { - return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) -} - // cachePackageName caches the package name for a dir already in the cache. func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { @@ -367,15 +472,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON return modDir != mod.Dir } -func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { - readModName := func(modFile string) string { - modBytes, err := os.ReadFile(modFile) - if err != nil { - return "" - } - return modulePath(modBytes) +func readModName(modFile string) string { + modBytes, err := os.ReadFile(modFile) + if err != nil { + return "" } + return modulePath(modBytes) +} +func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) { if r.dirInModuleCache(dir) { if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { index := strings.Index(dir, matches[1]+"@"+matches[2]) @@ -409,11 +514,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool { } func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if err := r.init(); err != nil { - return nil, err - } names := map[string]string{} for _, path := range importPaths { + // TODO(rfindley): shouldn't this use the dirInfoCache? _, packageDir := r.findPackage(path) if packageDir == "" { continue @@ -431,10 +534,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") defer done() - if err := r.init(); err != nil { - return err - } - processDir := func(info directoryPackageInfo) { // Skip this directory if we were not able to get the package information successfully. if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { @@ -444,18 +543,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error if err != nil { return } - if !callback.dirFound(pkg) { return } + pkg.packageName, err = r.cachePackageName(info) if err != nil { return } - if !callback.packageNameLoaded(pkg) { return } + _, exports, err := r.loadExports(ctx, pkg, false) if err != nil { return @@ -494,7 +593,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return packageScanned } - // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { r.cacheStore(r.scanDirForPackage(root, dir)) } @@ -509,9 +607,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error select { case <-ctx.Done(): return - case <-r.scanSema: + case <-r.scanSema: // acquire } - defer func() { r.scanSema <- struct{}{} }() + defer func() { r.scanSema <- struct{}{} }() // release // We have the lock on r.scannedRoots, and no other scans can run. for _, root := range roots { if ctx.Err() != nil { @@ -613,9 +711,6 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { } func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { - if err := r.init(); err != nil { - return "", nil, err - } if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 45690abb..cfc54657 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -7,8 +7,12 @@ package imports import ( "context" "fmt" + "path" + "path/filepath" + "strings" "sync" + "golang.org/x/mod/module" "golang.org/x/tools/internal/gopathwalk" ) @@ -39,6 +43,8 @@ const ( exportsLoaded ) +// directoryPackageInfo holds (possibly incomplete) information about packages +// contained in a given directory. type directoryPackageInfo struct { // status indicates the extent to which this struct has been filled in. status directoryPackageStatus @@ -63,7 +69,10 @@ type directoryPackageInfo struct { packageName string // the package name, as declared in the source. // Set when status >= exportsLoaded. - + // TODO(rfindley): it's hard to see this, but exports depend implicitly on + // the default build context GOOS and GOARCH. + // + // We can make this explicit, and key exports by GOOS, GOARCH. exports []string } @@ -79,7 +88,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( return true, nil } -// dirInfoCache is a concurrency safe map for storing information about +// DirInfoCache is a concurrency-safe map for storing information about // directories that may contain packages. // // The information in this cache is built incrementally. Entries are initialized in scan. @@ -92,21 +101,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( // The information in the cache is not expected to change for the cache's // lifetime, so there is no protection against competing writes. Users should // take care not to hold the cache across changes to the underlying files. -// -// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) -type dirInfoCache struct { +type DirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. dirs map[string]*directoryPackageInfo listeners map[*int]cacheListener } +func NewDirInfoCache() *DirInfoCache { + return &DirInfoCache{ + dirs: make(map[string]*directoryPackageInfo), + listeners: make(map[*int]cacheListener), + } +} + type cacheListener func(directoryPackageInfo) // ScanAndListen calls listener on all the items in the cache, and on anything // newly added. The returned stop function waits for all in-flight callbacks to // finish and blocks new ones. -func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { +func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { ctx, cancel := context.WithCancel(ctx) // Flushing out all the callbacks is tricky without knowing how many there @@ -162,8 +176,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener } // Store stores the package info for dir. -func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { +func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() + // TODO(rfindley, golang/go#59216): should we overwrite an existing entry? + // That seems incorrect as the cache should be idempotent. _, old := d.dirs[dir] d.dirs[dir] = &info var listeners []cacheListener @@ -180,7 +196,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { } // Load returns a copy of the directoryPackageInfo for absolute directory dir. -func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { +func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) { d.mu.Lock() defer d.mu.Unlock() info, ok := d.dirs[dir] @@ -191,7 +207,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { } // Keys returns the keys currently present in d. -func (d *dirInfoCache) Keys() (keys []string) { +func (d *DirInfoCache) Keys() (keys []string) { d.mu.Lock() defer d.mu.Unlock() for key := range d.dirs { @@ -200,7 +216,7 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { +func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { return info.packageName, err } @@ -213,7 +229,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro return info.packageName, info.err } -func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { if reached, _ := info.reachedStatus(exportsLoaded); reached { return info.packageName, info.exports, info.err } @@ -234,3 +250,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d d.Store(info.dir, info) return info.packageName, info.exports, info.err } + +// ScanModuleCache walks the given directory, which must be a GOMODCACHE value, +// for directory package information, storing the results in cache. +func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) { + // Note(rfindley): it's hard to see, but this function attempts to implement + // just the side effects on cache of calling PrimeCache with a ProcessEnv + // that has the given dir as its GOMODCACHE. + // + // Teasing out the control flow, we see that we can avoid any handling of + // vendor/ and can infer module info entirely from the path, simplifying the + // logic here. + + root := gopathwalk.Root{ + Path: filepath.Clean(dir), + Type: gopathwalk.RootModuleCache, + } + + directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo { + // This is a copy of ModuleResolver.scanDirForPackage, trimmed down to + // logic that applies to a module cache directory. + + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if logf != nil { + logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath := path.Join(modPath, filepath.ToSlash(matches[3])) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + modName := readModName(filepath.Join(modDir, "go.mod")) + return directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + } + + add := func(root gopathwalk.Root, dir string) { + info := directoryInfo(root, dir) + cache.Store(info.dir, info) + } + + skip := func(_ gopathwalk.Root, dir string) bool { + // Skip directories that have already been scanned. + // + // Note that gopathwalk only adds "package" directories, which must contain + // a .go file, and all such package directories in the module cache are + // immutable. So if we can load a dir, it can be skipped. + info, ok := cache.Load(dir) + if !ok { + return false + } + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true}) +} diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 9f992c2b..8db24df2 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -151,6 +151,7 @@ var stdlib = map[string][]string{ "cmp": { "Compare", "Less", + "Or", "Ordered", }, "compress/bzip2": { @@ -632,6 +633,8 @@ var stdlib = map[string][]string{ "NameMismatch", "NewCertPool", "NotAuthorizedToSign", + "OID", + "OIDFromInts", "PEMCipher", "PEMCipher3DES", "PEMCipherAES128", @@ -706,6 +709,7 @@ var stdlib = map[string][]string{ "LevelWriteCommitted", "Named", "NamedArg", + "Null", "NullBool", "NullByte", "NullFloat64", @@ -1921,6 +1925,7 @@ var stdlib = map[string][]string{ "R_LARCH_32", "R_LARCH_32_PCREL", "R_LARCH_64", + "R_LARCH_64_PCREL", "R_LARCH_ABS64_HI12", "R_LARCH_ABS64_LO20", "R_LARCH_ABS_HI20", @@ -1928,12 +1933,17 @@ var stdlib = map[string][]string{ "R_LARCH_ADD16", "R_LARCH_ADD24", "R_LARCH_ADD32", + "R_LARCH_ADD6", "R_LARCH_ADD64", "R_LARCH_ADD8", + "R_LARCH_ADD_ULEB128", + "R_LARCH_ALIGN", "R_LARCH_B16", "R_LARCH_B21", "R_LARCH_B26", + "R_LARCH_CFA", "R_LARCH_COPY", + "R_LARCH_DELETE", "R_LARCH_GNU_VTENTRY", "R_LARCH_GNU_VTINHERIT", "R_LARCH_GOT64_HI12", @@ -1953,6 +1963,7 @@ var stdlib = map[string][]string{ "R_LARCH_PCALA64_LO20", "R_LARCH_PCALA_HI20", "R_LARCH_PCALA_LO12", + "R_LARCH_PCREL20_S2", "R_LARCH_RELATIVE", "R_LARCH_RELAX", "R_LARCH_SOP_ADD", @@ -1983,8 +1994,10 @@ var stdlib = map[string][]string{ "R_LARCH_SUB16", "R_LARCH_SUB24", "R_LARCH_SUB32", + "R_LARCH_SUB6", "R_LARCH_SUB64", "R_LARCH_SUB8", + "R_LARCH_SUB_ULEB128", "R_LARCH_TLS_DTPMOD32", "R_LARCH_TLS_DTPMOD64", "R_LARCH_TLS_DTPREL32", @@ -2035,6 +2048,7 @@ var stdlib = map[string][]string{ "R_MIPS_LO16", "R_MIPS_NONE", "R_MIPS_PC16", + "R_MIPS_PC32", "R_MIPS_PJUMP", "R_MIPS_REL16", "R_MIPS_REL32", @@ -2952,6 +2966,8 @@ var stdlib = map[string][]string{ "RegisterName", }, "encoding/hex": { + "AppendDecode", + "AppendEncode", "Decode", "DecodeString", "DecodedLen", @@ -3233,6 +3249,7 @@ var stdlib = map[string][]string{ "TypeSpec", "TypeSwitchStmt", "UnaryExpr", + "Unparen", "ValueSpec", "Var", "Visitor", @@ -3492,6 +3509,7 @@ var stdlib = map[string][]string{ "XOR_ASSIGN", }, "go/types": { + "Alias", "ArgumentError", "Array", "AssertableTo", @@ -3559,6 +3577,7 @@ var stdlib = map[string][]string{ "MethodVal", "MissingMethod", "Named", + "NewAlias", "NewArray", "NewChan", "NewChecker", @@ -3627,6 +3646,7 @@ var stdlib = map[string][]string{ "Uint64", "Uint8", "Uintptr", + "Unalias", "Union", "Universe", "Unsafe", @@ -3643,6 +3663,11 @@ var stdlib = map[string][]string{ "WriteSignature", "WriteType", }, + "go/version": { + "Compare", + "IsValid", + "Lang", + }, "hash": { "Hash", "Hash32", @@ -4078,6 +4103,7 @@ var stdlib = map[string][]string{ "NewTextHandler", "Record", "SetDefault", + "SetLogLoggerLevel", "Source", "SourceKey", "String", @@ -4367,6 +4393,35 @@ var stdlib = map[string][]string{ "Uint64", "Zipf", }, + "math/rand/v2": { + "ChaCha8", + "ExpFloat64", + "Float32", + "Float64", + "Int", + "Int32", + "Int32N", + "Int64", + "Int64N", + "IntN", + "N", + "New", + "NewChaCha8", + "NewPCG", + "NewZipf", + "NormFloat64", + "PCG", + "Perm", + "Rand", + "Shuffle", + "Source", + "Uint32", + "Uint32N", + "Uint64", + "Uint64N", + "UintN", + "Zipf", + }, "mime": { "AddExtensionType", "BEncoding", @@ -4540,6 +4595,7 @@ var stdlib = map[string][]string{ "FS", "File", "FileServer", + "FileServerFS", "FileSystem", "Flusher", "Get", @@ -4566,6 +4622,7 @@ var stdlib = map[string][]string{ "MethodPut", "MethodTrace", "NewFileTransport", + "NewFileTransportFS", "NewRequest", "NewRequestWithContext", "NewResponseController", @@ -4599,6 +4656,7 @@ var stdlib = map[string][]string{ "Serve", "ServeContent", "ServeFile", + "ServeFileFS", "ServeMux", "ServeTLS", "Server", @@ -5106,6 +5164,7 @@ var stdlib = map[string][]string{ "StructTag", "Swapper", "Type", + "TypeFor", "TypeOf", "Uint", "Uint16", @@ -5342,6 +5401,7 @@ var stdlib = map[string][]string{ "CompactFunc", "Compare", "CompareFunc", + "Concat", "Contains", "ContainsFunc", "Delete", @@ -10824,6 +10884,7 @@ var stdlib = map[string][]string{ "Value", }, "testing/slogtest": { + "Run", "TestHandler", }, "text/scanner": { diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 00000000..2ef36bbc --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c5658f49..c8482aa9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# github.com/99designs/gqlgen v0.17.42 -## explicit; go 1.18 +# github.com/99designs/gqlgen v0.17.44 +## explicit; go 1.20 github.com/99designs/gqlgen github.com/99designs/gqlgen/api github.com/99designs/gqlgen/codegen @@ -51,8 +51,8 @@ github.com/cespare/xxhash/v2 # github.com/cpuguy83/go-md2man/v2 v2.0.2 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/datarhei/gosrt v0.5.5 -## explicit; go 1.18 +# github.com/datarhei/gosrt v0.5.7 +## explicit; go 1.20 github.com/datarhei/gosrt github.com/datarhei/gosrt/internal/circular github.com/datarhei/gosrt/internal/congestion @@ -60,7 +60,7 @@ github.com/datarhei/gosrt/internal/crypto github.com/datarhei/gosrt/internal/net github.com/datarhei/gosrt/internal/packet github.com/datarhei/gosrt/internal/rand -# github.com/datarhei/joy4 v0.0.0-20240103155326-704dff2c27fb +# github.com/datarhei/joy4 v0.0.0-20240229100136-43bcaf8ef5e7 ## explicit; go 1.14 github.com/datarhei/joy4/av github.com/datarhei/joy4/av/avutil @@ -115,7 +115,7 @@ github.com/go-openapi/jsonreference/internal # github.com/go-openapi/spec v0.20.14 ## explicit; go 1.19 github.com/go-openapi/spec -# github.com/go-openapi/swag v0.22.7 +# github.com/go-openapi/swag v0.22.9 ## explicit; go 1.19 github.com/go-openapi/swag # github.com/go-playground/locales v0.14.1 @@ -125,7 +125,7 @@ github.com/go-playground/locales/currency # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.16.0 +# github.com/go-playground/validator/v10 v10.18.0 ## explicit; go 1.18 github.com/go-playground/validator/v10 # github.com/gobwas/glob v0.2.3 @@ -147,7 +147,7 @@ github.com/golang-jwt/jwt/v4 # github.com/golang-jwt/jwt/v5 v5.2.0 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/websocket v1.5.1 @@ -174,10 +174,11 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.4 -## explicit; go 1.19 +# github.com/klauspost/compress v1.17.7 +## explicit; go 1.20 +github.com/klauspost/compress/internal/race github.com/klauspost/compress/s2 -# github.com/klauspost/cpuid/v2 v2.2.6 +# github.com/klauspost/cpuid/v2 v2.2.7 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 # github.com/labstack/echo-jwt v0.0.0-20221127215225-c84d41a71003 @@ -192,16 +193,17 @@ github.com/labstack/echo/v4/middleware github.com/labstack/gommon/bytes github.com/labstack/gommon/color github.com/labstack/gommon/log -# github.com/leodido/go-urn v1.2.4 -## explicit; go 1.16 +# github.com/leodido/go-urn v1.4.0 +## explicit; go 1.18 github.com/leodido/go-urn +github.com/leodido/go-urn/scim/schema # github.com/libdns/libdns v0.2.1 ## explicit; go 1.14 github.com/libdns/libdns # github.com/lithammer/shortuuid/v4 v4.0.0 ## explicit; go 1.13 github.com/lithammer/shortuuid/v4 -# github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed +# github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a ## explicit; go 1.16 github.com/lufia/plan9stats # github.com/mailru/easyjson v0.7.7 @@ -215,20 +217,17 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 -## explicit; go 1.19 -github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/mholt/acmez v1.2.0 ## explicit; go 1.20 github.com/mholt/acmez github.com/mholt/acmez/acme -# github.com/miekg/dns v1.1.57 +# github.com/miekg/dns v1.1.58 ## explicit; go 1.19 github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.66 +# github.com/minio/minio-go/v7 v7.0.67 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials @@ -256,21 +255,21 @@ github.com/modern-go/reflect2 # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b +# github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 ## explicit; go 1.14 github.com/power-devops/perfstat # github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 ## explicit github.com/prep/average -# github.com/prometheus/client_golang v1.18.0 -## explicit; go 1.19 +# github.com/prometheus/client_golang v1.19.0 +## explicit; go 1.20 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.5.0 +# github.com/prometheus/client_model v0.6.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.45.0 +# github.com/prometheus/common v0.48.0 ## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -286,7 +285,7 @@ github.com/rs/xid # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 -# github.com/shirou/gopsutil/v3 v3.23.12 +# github.com/shirou/gopsutil/v3 v3.24.1 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/cpu @@ -314,7 +313,7 @@ github.com/swaggo/echo-swagger # github.com/swaggo/files/v2 v2.0.0 ## explicit; go 1.16 github.com/swaggo/files/v2 -# github.com/swaggo/swag v1.16.2 +# github.com/swaggo/swag v1.16.3 ## explicit; go 1.18 github.com/swaggo/swag # github.com/tklauser/go-sysconf v0.3.13 @@ -323,7 +322,7 @@ github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.7.0 ## explicit; go 1.18 github.com/tklauser/numcpus -# github.com/urfave/cli/v2 v2.25.5 +# github.com/urfave/cli/v2 v2.27.1 ## explicit; go 1.18 github.com/urfave/cli/v2 # github.com/valyala/bytebufferpool v1.0.0 @@ -332,7 +331,7 @@ github.com/valyala/bytebufferpool # github.com/valyala/fasttemplate v1.2.2 ## explicit; go 1.12 github.com/valyala/fasttemplate -# github.com/vektah/gqlparser/v2 v2.5.10 +# github.com/vektah/gqlparser/v2 v2.5.11 ## explicit; go 1.19 github.com/vektah/gqlparser/v2 github.com/vektah/gqlparser/v2/ast @@ -353,7 +352,7 @@ github.com/xeipuuv/gojsonschema # github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 ## explicit github.com/xrash/smetrics -# github.com/yusufpapurcu/wmi v1.2.3 +# github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi # github.com/zeebo/blake3 v0.2.3 @@ -371,7 +370,7 @@ github.com/zeebo/blake3/internal/utils # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.26.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -382,7 +381,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.18.0 +# golang.org/x/crypto v0.20.0 ## explicit; go 1.18 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert @@ -393,12 +392,12 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 golang.org/x/crypto/sha3 -# golang.org/x/mod v0.14.0 +# golang.org/x/mod v0.15.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.21.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/html @@ -415,7 +414,7 @@ golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/publicsuffix -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.17.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix @@ -436,7 +435,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.17.0 +# golang.org/x/tools v0.18.0 ## explicit; go 1.18 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/buildutil @@ -464,6 +463,7 @@ golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions # google.golang.org/protobuf v1.32.0 ## explicit; go 1.17 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt